ib_verbs.c 101 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771
  1. /*
  2. * Broadcom NetXtreme-E RoCE driver.
  3. *
  4. * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
  5. * Broadcom refers to Broadcom Limited and/or its subsidiaries.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or without
  14. * modification, are permitted provided that the following conditions
  15. * are met:
  16. *
  17. * 1. Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. * 2. Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in
  21. * the documentation and/or other materials provided with the
  22. * distribution.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. *
  36. * Description: IB Verbs interpreter
  37. */
  38. #include <linux/interrupt.h>
  39. #include <linux/types.h>
  40. #include <linux/pci.h>
  41. #include <linux/netdevice.h>
  42. #include <linux/if_ether.h>
  43. #include <rdma/ib_verbs.h>
  44. #include <rdma/ib_user_verbs.h>
  45. #include <rdma/ib_umem.h>
  46. #include <rdma/ib_addr.h>
  47. #include <rdma/ib_mad.h>
  48. #include <rdma/ib_cache.h>
  49. #include "bnxt_ulp.h"
  50. #include "roce_hsi.h"
  51. #include "qplib_res.h"
  52. #include "qplib_sp.h"
  53. #include "qplib_fp.h"
  54. #include "qplib_rcfw.h"
  55. #include "bnxt_re.h"
  56. #include "ib_verbs.h"
  57. #include <rdma/bnxt_re-abi.h>
  58. static int __from_ib_access_flags(int iflags)
  59. {
  60. int qflags = 0;
  61. if (iflags & IB_ACCESS_LOCAL_WRITE)
  62. qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
  63. if (iflags & IB_ACCESS_REMOTE_READ)
  64. qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
  65. if (iflags & IB_ACCESS_REMOTE_WRITE)
  66. qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
  67. if (iflags & IB_ACCESS_REMOTE_ATOMIC)
  68. qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
  69. if (iflags & IB_ACCESS_MW_BIND)
  70. qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
  71. if (iflags & IB_ZERO_BASED)
  72. qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
  73. if (iflags & IB_ACCESS_ON_DEMAND)
  74. qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
  75. return qflags;
  76. };
  77. static enum ib_access_flags __to_ib_access_flags(int qflags)
  78. {
  79. enum ib_access_flags iflags = 0;
  80. if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
  81. iflags |= IB_ACCESS_LOCAL_WRITE;
  82. if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
  83. iflags |= IB_ACCESS_REMOTE_WRITE;
  84. if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
  85. iflags |= IB_ACCESS_REMOTE_READ;
  86. if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
  87. iflags |= IB_ACCESS_REMOTE_ATOMIC;
  88. if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
  89. iflags |= IB_ACCESS_MW_BIND;
  90. if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
  91. iflags |= IB_ZERO_BASED;
  92. if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
  93. iflags |= IB_ACCESS_ON_DEMAND;
  94. return iflags;
  95. };
  96. static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
  97. struct bnxt_qplib_sge *sg_list, int num)
  98. {
  99. int i, total = 0;
  100. for (i = 0; i < num; i++) {
  101. sg_list[i].addr = ib_sg_list[i].addr;
  102. sg_list[i].lkey = ib_sg_list[i].lkey;
  103. sg_list[i].size = ib_sg_list[i].length;
  104. total += sg_list[i].size;
  105. }
  106. return total;
  107. }
  108. /* Device */
  109. struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
  110. {
  111. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  112. struct net_device *netdev = NULL;
  113. rcu_read_lock();
  114. if (rdev)
  115. netdev = rdev->netdev;
  116. if (netdev)
  117. dev_hold(netdev);
  118. rcu_read_unlock();
  119. return netdev;
  120. }
  121. int bnxt_re_query_device(struct ib_device *ibdev,
  122. struct ib_device_attr *ib_attr,
  123. struct ib_udata *udata)
  124. {
  125. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  126. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  127. memset(ib_attr, 0, sizeof(*ib_attr));
  128. memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
  129. min(sizeof(dev_attr->fw_ver),
  130. sizeof(ib_attr->fw_ver)));
  131. bnxt_qplib_get_guid(rdev->netdev->dev_addr,
  132. (u8 *)&ib_attr->sys_image_guid);
  133. ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
  134. ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
  135. ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
  136. ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
  137. ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
  138. ib_attr->max_qp = dev_attr->max_qp;
  139. ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
  140. ib_attr->device_cap_flags =
  141. IB_DEVICE_CURR_QP_STATE_MOD
  142. | IB_DEVICE_RC_RNR_NAK_GEN
  143. | IB_DEVICE_SHUTDOWN_PORT
  144. | IB_DEVICE_SYS_IMAGE_GUID
  145. | IB_DEVICE_LOCAL_DMA_LKEY
  146. | IB_DEVICE_RESIZE_MAX_WR
  147. | IB_DEVICE_PORT_ACTIVE_EVENT
  148. | IB_DEVICE_N_NOTIFY_CQ
  149. | IB_DEVICE_MEM_WINDOW
  150. | IB_DEVICE_MEM_WINDOW_TYPE_2B
  151. | IB_DEVICE_MEM_MGT_EXTENSIONS;
  152. ib_attr->max_send_sge = dev_attr->max_qp_sges;
  153. ib_attr->max_recv_sge = dev_attr->max_qp_sges;
  154. ib_attr->max_sge_rd = dev_attr->max_qp_sges;
  155. ib_attr->max_cq = dev_attr->max_cq;
  156. ib_attr->max_cqe = dev_attr->max_cq_wqes;
  157. ib_attr->max_mr = dev_attr->max_mr;
  158. ib_attr->max_pd = dev_attr->max_pd;
  159. ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
  160. ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
  161. ib_attr->atomic_cap = IB_ATOMIC_NONE;
  162. ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
  163. ib_attr->max_ee_rd_atom = 0;
  164. ib_attr->max_res_rd_atom = 0;
  165. ib_attr->max_ee_init_rd_atom = 0;
  166. ib_attr->max_ee = 0;
  167. ib_attr->max_rdd = 0;
  168. ib_attr->max_mw = dev_attr->max_mw;
  169. ib_attr->max_raw_ipv6_qp = 0;
  170. ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
  171. ib_attr->max_mcast_grp = 0;
  172. ib_attr->max_mcast_qp_attach = 0;
  173. ib_attr->max_total_mcast_qp_attach = 0;
  174. ib_attr->max_ah = dev_attr->max_ah;
  175. ib_attr->max_fmr = 0;
  176. ib_attr->max_map_per_fmr = 0;
  177. ib_attr->max_srq = dev_attr->max_srq;
  178. ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
  179. ib_attr->max_srq_sge = dev_attr->max_srq_sges;
  180. ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
  181. ib_attr->max_pkeys = 1;
  182. ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
  183. return 0;
  184. }
  185. int bnxt_re_modify_device(struct ib_device *ibdev,
  186. int device_modify_mask,
  187. struct ib_device_modify *device_modify)
  188. {
  189. switch (device_modify_mask) {
  190. case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
  191. /* Modify the GUID requires the modification of the GID table */
  192. /* GUID should be made as READ-ONLY */
  193. break;
  194. case IB_DEVICE_MODIFY_NODE_DESC:
  195. /* Node Desc should be made as READ-ONLY */
  196. break;
  197. default:
  198. break;
  199. }
  200. return 0;
  201. }
  202. /* Port */
  203. int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
  204. struct ib_port_attr *port_attr)
  205. {
  206. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  207. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  208. memset(port_attr, 0, sizeof(*port_attr));
  209. if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
  210. port_attr->state = IB_PORT_ACTIVE;
  211. port_attr->phys_state = 5;
  212. } else {
  213. port_attr->state = IB_PORT_DOWN;
  214. port_attr->phys_state = 3;
  215. }
  216. port_attr->max_mtu = IB_MTU_4096;
  217. port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
  218. port_attr->gid_tbl_len = dev_attr->max_sgid;
  219. port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
  220. IB_PORT_DEVICE_MGMT_SUP |
  221. IB_PORT_VENDOR_CLASS_SUP;
  222. port_attr->ip_gids = true;
  223. port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
  224. port_attr->bad_pkey_cntr = 0;
  225. port_attr->qkey_viol_cntr = 0;
  226. port_attr->pkey_tbl_len = dev_attr->max_pkey;
  227. port_attr->lid = 0;
  228. port_attr->sm_lid = 0;
  229. port_attr->lmc = 0;
  230. port_attr->max_vl_num = 4;
  231. port_attr->sm_sl = 0;
  232. port_attr->subnet_timeout = 0;
  233. port_attr->init_type_reply = 0;
  234. port_attr->active_speed = rdev->active_speed;
  235. port_attr->active_width = rdev->active_width;
  236. return 0;
  237. }
  238. int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
  239. struct ib_port_immutable *immutable)
  240. {
  241. struct ib_port_attr port_attr;
  242. if (bnxt_re_query_port(ibdev, port_num, &port_attr))
  243. return -EINVAL;
  244. immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
  245. immutable->gid_tbl_len = port_attr.gid_tbl_len;
  246. immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
  247. immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
  248. immutable->max_mad_size = IB_MGMT_MAD_SIZE;
  249. return 0;
  250. }
  251. void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
  252. {
  253. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  254. snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
  255. rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
  256. rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
  257. }
  258. int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
  259. u16 index, u16 *pkey)
  260. {
  261. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  262. /* Ignore port_num */
  263. memset(pkey, 0, sizeof(*pkey));
  264. return bnxt_qplib_get_pkey(&rdev->qplib_res,
  265. &rdev->qplib_res.pkey_tbl, index, pkey);
  266. }
  267. int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
  268. int index, union ib_gid *gid)
  269. {
  270. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  271. int rc = 0;
  272. /* Ignore port_num */
  273. memset(gid, 0, sizeof(*gid));
  274. rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
  275. &rdev->qplib_res.sgid_tbl, index,
  276. (struct bnxt_qplib_gid *)gid);
  277. return rc;
  278. }
  279. int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
  280. {
  281. int rc = 0;
  282. struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
  283. struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
  284. struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
  285. struct bnxt_qplib_gid *gid_to_del;
  286. /* Delete the entry from the hardware */
  287. ctx = *context;
  288. if (!ctx)
  289. return -EINVAL;
  290. if (sgid_tbl && sgid_tbl->active) {
  291. if (ctx->idx >= sgid_tbl->max)
  292. return -EINVAL;
  293. gid_to_del = &sgid_tbl->tbl[ctx->idx];
  294. /* DEL_GID is called in WQ context(netdevice_event_work_handler)
  295. * or via the ib_unregister_device path. In the former case QP1
  296. * may not be destroyed yet, in which case just return as FW
  297. * needs that entry to be present and will fail it's deletion.
  298. * We could get invoked again after QP1 is destroyed OR get an
  299. * ADD_GID call with a different GID value for the same index
  300. * where we issue MODIFY_GID cmd to update the GID entry -- TBD
  301. */
  302. if (ctx->idx == 0 &&
  303. rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
  304. ctx->refcnt == 1 && rdev->qp1_sqp) {
  305. dev_dbg(rdev_to_dev(rdev),
  306. "Trying to delete GID0 while QP1 is alive\n");
  307. return -EFAULT;
  308. }
  309. ctx->refcnt--;
  310. if (!ctx->refcnt) {
  311. rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
  312. if (rc) {
  313. dev_err(rdev_to_dev(rdev),
  314. "Failed to remove GID: %#x", rc);
  315. } else {
  316. ctx_tbl = sgid_tbl->ctx;
  317. ctx_tbl[ctx->idx] = NULL;
  318. kfree(ctx);
  319. }
  320. }
  321. } else {
  322. return -EINVAL;
  323. }
  324. return rc;
  325. }
  326. int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
  327. {
  328. int rc;
  329. u32 tbl_idx = 0;
  330. u16 vlan_id = 0xFFFF;
  331. struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
  332. struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
  333. struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
  334. if ((attr->ndev) && is_vlan_dev(attr->ndev))
  335. vlan_id = vlan_dev_vlan_id(attr->ndev);
  336. rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
  337. rdev->qplib_res.netdev->dev_addr,
  338. vlan_id, true, &tbl_idx);
  339. if (rc == -EALREADY) {
  340. ctx_tbl = sgid_tbl->ctx;
  341. ctx_tbl[tbl_idx]->refcnt++;
  342. *context = ctx_tbl[tbl_idx];
  343. return 0;
  344. }
  345. if (rc < 0) {
  346. dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
  347. return rc;
  348. }
  349. ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  350. if (!ctx)
  351. return -ENOMEM;
  352. ctx_tbl = sgid_tbl->ctx;
  353. ctx->idx = tbl_idx;
  354. ctx->refcnt = 1;
  355. ctx_tbl[tbl_idx] = ctx;
  356. *context = ctx;
  357. return rc;
  358. }
  359. enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
  360. u8 port_num)
  361. {
  362. return IB_LINK_LAYER_ETHERNET;
  363. }
  364. #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
  365. static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
  366. {
  367. struct bnxt_re_fence_data *fence = &pd->fence;
  368. struct ib_mr *ib_mr = &fence->mr->ib_mr;
  369. struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
  370. memset(wqe, 0, sizeof(*wqe));
  371. wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
  372. wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
  373. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  374. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  375. wqe->bind.zero_based = false;
  376. wqe->bind.parent_l_key = ib_mr->lkey;
  377. wqe->bind.va = (u64)(unsigned long)fence->va;
  378. wqe->bind.length = fence->size;
  379. wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
  380. wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
  381. /* Save the initial rkey in fence structure for now;
  382. * wqe->bind.r_key will be set at (re)bind time.
  383. */
  384. fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
  385. }
  386. static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
  387. {
  388. struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
  389. qplib_qp);
  390. struct ib_pd *ib_pd = qp->ib_qp.pd;
  391. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  392. struct bnxt_re_fence_data *fence = &pd->fence;
  393. struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
  394. struct bnxt_qplib_swqe wqe;
  395. int rc;
  396. memcpy(&wqe, fence_wqe, sizeof(wqe));
  397. wqe.bind.r_key = fence->bind_rkey;
  398. fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
  399. dev_dbg(rdev_to_dev(qp->rdev),
  400. "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
  401. wqe.bind.r_key, qp->qplib_qp.id, pd);
  402. rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
  403. if (rc) {
  404. dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
  405. return rc;
  406. }
  407. bnxt_qplib_post_send_db(&qp->qplib_qp);
  408. return rc;
  409. }
  410. static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
  411. {
  412. struct bnxt_re_fence_data *fence = &pd->fence;
  413. struct bnxt_re_dev *rdev = pd->rdev;
  414. struct device *dev = &rdev->en_dev->pdev->dev;
  415. struct bnxt_re_mr *mr = fence->mr;
  416. if (fence->mw) {
  417. bnxt_re_dealloc_mw(fence->mw);
  418. fence->mw = NULL;
  419. }
  420. if (mr) {
  421. if (mr->ib_mr.rkey)
  422. bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
  423. true);
  424. if (mr->ib_mr.lkey)
  425. bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
  426. kfree(mr);
  427. fence->mr = NULL;
  428. }
  429. if (fence->dma_addr) {
  430. dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
  431. DMA_BIDIRECTIONAL);
  432. fence->dma_addr = 0;
  433. }
  434. }
  435. static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
  436. {
  437. int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
  438. struct bnxt_re_fence_data *fence = &pd->fence;
  439. struct bnxt_re_dev *rdev = pd->rdev;
  440. struct device *dev = &rdev->en_dev->pdev->dev;
  441. struct bnxt_re_mr *mr = NULL;
  442. dma_addr_t dma_addr = 0;
  443. struct ib_mw *mw;
  444. u64 pbl_tbl;
  445. int rc;
  446. dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
  447. DMA_BIDIRECTIONAL);
  448. rc = dma_mapping_error(dev, dma_addr);
  449. if (rc) {
  450. dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
  451. rc = -EIO;
  452. fence->dma_addr = 0;
  453. goto fail;
  454. }
  455. fence->dma_addr = dma_addr;
  456. /* Allocate a MR */
  457. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  458. if (!mr) {
  459. rc = -ENOMEM;
  460. goto fail;
  461. }
  462. fence->mr = mr;
  463. mr->rdev = rdev;
  464. mr->qplib_mr.pd = &pd->qplib_pd;
  465. mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
  466. mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
  467. rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
  468. if (rc) {
  469. dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
  470. goto fail;
  471. }
  472. /* Register MR */
  473. mr->ib_mr.lkey = mr->qplib_mr.lkey;
  474. mr->qplib_mr.va = (u64)(unsigned long)fence->va;
  475. mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
  476. pbl_tbl = dma_addr;
  477. rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
  478. BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
  479. if (rc) {
  480. dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
  481. goto fail;
  482. }
  483. mr->ib_mr.rkey = mr->qplib_mr.rkey;
  484. /* Create a fence MW only for kernel consumers */
  485. mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
  486. if (IS_ERR(mw)) {
  487. dev_err(rdev_to_dev(rdev),
  488. "Failed to create fence-MW for PD: %p\n", pd);
  489. rc = PTR_ERR(mw);
  490. goto fail;
  491. }
  492. fence->mw = mw;
  493. bnxt_re_create_fence_wqe(pd);
  494. return 0;
  495. fail:
  496. bnxt_re_destroy_fence_mr(pd);
  497. return rc;
  498. }
  499. /* Protection Domains */
  500. int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
  501. {
  502. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  503. struct bnxt_re_dev *rdev = pd->rdev;
  504. int rc;
  505. bnxt_re_destroy_fence_mr(pd);
  506. if (pd->qplib_pd.id) {
  507. rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
  508. &rdev->qplib_res.pd_tbl,
  509. &pd->qplib_pd);
  510. if (rc)
  511. dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
  512. }
  513. kfree(pd);
  514. return 0;
  515. }
  516. struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
  517. struct ib_ucontext *ucontext,
  518. struct ib_udata *udata)
  519. {
  520. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  521. struct bnxt_re_ucontext *ucntx = container_of(ucontext,
  522. struct bnxt_re_ucontext,
  523. ib_uctx);
  524. struct bnxt_re_pd *pd;
  525. int rc;
  526. pd = kzalloc(sizeof(*pd), GFP_KERNEL);
  527. if (!pd)
  528. return ERR_PTR(-ENOMEM);
  529. pd->rdev = rdev;
  530. if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
  531. dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
  532. rc = -ENOMEM;
  533. goto fail;
  534. }
  535. if (udata) {
  536. struct bnxt_re_pd_resp resp;
  537. if (!ucntx->dpi.dbr) {
  538. /* Allocate DPI in alloc_pd to avoid failing of
  539. * ibv_devinfo and family of application when DPIs
  540. * are depleted.
  541. */
  542. if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
  543. &ucntx->dpi, ucntx)) {
  544. rc = -ENOMEM;
  545. goto dbfail;
  546. }
  547. }
  548. resp.pdid = pd->qplib_pd.id;
  549. /* Still allow mapping this DBR to the new user PD. */
  550. resp.dpi = ucntx->dpi.dpi;
  551. resp.dbr = (u64)ucntx->dpi.umdbr;
  552. rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
  553. if (rc) {
  554. dev_err(rdev_to_dev(rdev),
  555. "Failed to copy user response\n");
  556. goto dbfail;
  557. }
  558. }
  559. if (!udata)
  560. if (bnxt_re_create_fence_mr(pd))
  561. dev_warn(rdev_to_dev(rdev),
  562. "Failed to create Fence-MR\n");
  563. return &pd->ib_pd;
  564. dbfail:
  565. (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
  566. &pd->qplib_pd);
  567. fail:
  568. kfree(pd);
  569. return ERR_PTR(rc);
  570. }
  571. /* Address Handles */
  572. int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
  573. {
  574. struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
  575. struct bnxt_re_dev *rdev = ah->rdev;
  576. int rc;
  577. rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
  578. if (rc) {
  579. dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
  580. return rc;
  581. }
  582. kfree(ah);
  583. return 0;
  584. }
  585. struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
  586. struct rdma_ah_attr *ah_attr,
  587. struct ib_udata *udata)
  588. {
  589. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  590. struct bnxt_re_dev *rdev = pd->rdev;
  591. struct bnxt_re_ah *ah;
  592. const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
  593. int rc;
  594. u8 nw_type;
  595. if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
  596. dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
  597. return ERR_PTR(-EINVAL);
  598. }
  599. ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
  600. if (!ah)
  601. return ERR_PTR(-ENOMEM);
  602. ah->rdev = rdev;
  603. ah->qplib_ah.pd = &pd->qplib_pd;
  604. /* Supply the configuration for the HW */
  605. memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
  606. sizeof(union ib_gid));
  607. /*
  608. * If RoCE V2 is enabled, stack will have two entries for
  609. * each GID entry. Avoiding this duplicte entry in HW. Dividing
  610. * the GID index by 2 for RoCE V2
  611. */
  612. ah->qplib_ah.sgid_index = grh->sgid_index / 2;
  613. ah->qplib_ah.host_sgid_index = grh->sgid_index;
  614. ah->qplib_ah.traffic_class = grh->traffic_class;
  615. ah->qplib_ah.flow_label = grh->flow_label;
  616. ah->qplib_ah.hop_limit = grh->hop_limit;
  617. ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
  618. if (ib_pd->uobject &&
  619. !rdma_is_multicast_addr((struct in6_addr *)
  620. grh->dgid.raw) &&
  621. !rdma_link_local_addr((struct in6_addr *)
  622. grh->dgid.raw)) {
  623. const struct ib_gid_attr *sgid_attr;
  624. sgid_attr = grh->sgid_attr;
  625. /* Get network header type for this GID */
  626. nw_type = rdma_gid_attr_network_type(sgid_attr);
  627. switch (nw_type) {
  628. case RDMA_NETWORK_IPV4:
  629. ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
  630. break;
  631. case RDMA_NETWORK_IPV6:
  632. ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
  633. break;
  634. default:
  635. ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
  636. break;
  637. }
  638. }
  639. memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
  640. rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
  641. if (rc) {
  642. dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
  643. goto fail;
  644. }
  645. /* Write AVID to shared page. */
  646. if (ib_pd->uobject) {
  647. struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
  648. struct bnxt_re_ucontext *uctx;
  649. unsigned long flag;
  650. u32 *wrptr;
  651. uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
  652. spin_lock_irqsave(&uctx->sh_lock, flag);
  653. wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
  654. *wrptr = ah->qplib_ah.id;
  655. wmb(); /* make sure cache is updated. */
  656. spin_unlock_irqrestore(&uctx->sh_lock, flag);
  657. }
  658. return &ah->ib_ah;
  659. fail:
  660. kfree(ah);
  661. return ERR_PTR(rc);
  662. }
  663. int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
  664. {
  665. return 0;
  666. }
  667. int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
  668. {
  669. struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
  670. ah_attr->type = ib_ah->type;
  671. rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
  672. memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
  673. rdma_ah_set_grh(ah_attr, NULL, 0,
  674. ah->qplib_ah.host_sgid_index,
  675. 0, ah->qplib_ah.traffic_class);
  676. rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
  677. rdma_ah_set_port_num(ah_attr, 1);
  678. rdma_ah_set_static_rate(ah_attr, 0);
  679. return 0;
  680. }
  681. unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
  682. __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
  683. {
  684. unsigned long flags;
  685. spin_lock_irqsave(&qp->scq->cq_lock, flags);
  686. if (qp->rcq != qp->scq)
  687. spin_lock(&qp->rcq->cq_lock);
  688. else
  689. __acquire(&qp->rcq->cq_lock);
  690. return flags;
  691. }
  692. void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
  693. unsigned long flags)
  694. __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
  695. {
  696. if (qp->rcq != qp->scq)
  697. spin_unlock(&qp->rcq->cq_lock);
  698. else
  699. __release(&qp->rcq->cq_lock);
  700. spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
  701. }
  702. /* Queue Pairs */
  703. int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
  704. {
  705. struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
  706. struct bnxt_re_dev *rdev = qp->rdev;
  707. int rc;
  708. unsigned int flags;
  709. bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
  710. rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
  711. if (rc) {
  712. dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
  713. return rc;
  714. }
  715. flags = bnxt_re_lock_cqs(qp);
  716. bnxt_qplib_clean_qp(&qp->qplib_qp);
  717. bnxt_re_unlock_cqs(qp, flags);
  718. bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
  719. if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
  720. rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
  721. &rdev->sqp_ah->qplib_ah);
  722. if (rc) {
  723. dev_err(rdev_to_dev(rdev),
  724. "Failed to destroy HW AH for shadow QP");
  725. return rc;
  726. }
  727. bnxt_qplib_clean_qp(&qp->qplib_qp);
  728. rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
  729. &rdev->qp1_sqp->qplib_qp);
  730. if (rc) {
  731. dev_err(rdev_to_dev(rdev),
  732. "Failed to destroy Shadow QP");
  733. return rc;
  734. }
  735. mutex_lock(&rdev->qp_lock);
  736. list_del(&rdev->qp1_sqp->list);
  737. atomic_dec(&rdev->qp_count);
  738. mutex_unlock(&rdev->qp_lock);
  739. kfree(rdev->sqp_ah);
  740. kfree(rdev->qp1_sqp);
  741. rdev->qp1_sqp = NULL;
  742. rdev->sqp_ah = NULL;
  743. }
  744. if (!IS_ERR_OR_NULL(qp->rumem))
  745. ib_umem_release(qp->rumem);
  746. if (!IS_ERR_OR_NULL(qp->sumem))
  747. ib_umem_release(qp->sumem);
  748. mutex_lock(&rdev->qp_lock);
  749. list_del(&qp->list);
  750. atomic_dec(&rdev->qp_count);
  751. mutex_unlock(&rdev->qp_lock);
  752. kfree(qp);
  753. return 0;
  754. }
  755. static u8 __from_ib_qp_type(enum ib_qp_type type)
  756. {
  757. switch (type) {
  758. case IB_QPT_GSI:
  759. return CMDQ_CREATE_QP1_TYPE_GSI;
  760. case IB_QPT_RC:
  761. return CMDQ_CREATE_QP_TYPE_RC;
  762. case IB_QPT_UD:
  763. return CMDQ_CREATE_QP_TYPE_UD;
  764. default:
  765. return IB_QPT_MAX;
  766. }
  767. }
  768. static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
  769. struct bnxt_re_qp *qp, struct ib_udata *udata)
  770. {
  771. struct bnxt_re_qp_req ureq;
  772. struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
  773. struct ib_umem *umem;
  774. int bytes = 0;
  775. struct ib_ucontext *context = pd->ib_pd.uobject->context;
  776. struct bnxt_re_ucontext *cntx = container_of(context,
  777. struct bnxt_re_ucontext,
  778. ib_uctx);
  779. if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
  780. return -EFAULT;
  781. bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
  782. /* Consider mapping PSN search memory only for RC QPs. */
  783. if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
  784. bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
  785. bytes = PAGE_ALIGN(bytes);
  786. umem = ib_umem_get(context, ureq.qpsva, bytes,
  787. IB_ACCESS_LOCAL_WRITE, 1);
  788. if (IS_ERR(umem))
  789. return PTR_ERR(umem);
  790. qp->sumem = umem;
  791. qplib_qp->sq.sglist = umem->sg_head.sgl;
  792. qplib_qp->sq.nmap = umem->nmap;
  793. qplib_qp->qp_handle = ureq.qp_handle;
  794. if (!qp->qplib_qp.srq) {
  795. bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
  796. bytes = PAGE_ALIGN(bytes);
  797. umem = ib_umem_get(context, ureq.qprva, bytes,
  798. IB_ACCESS_LOCAL_WRITE, 1);
  799. if (IS_ERR(umem))
  800. goto rqfail;
  801. qp->rumem = umem;
  802. qplib_qp->rq.sglist = umem->sg_head.sgl;
  803. qplib_qp->rq.nmap = umem->nmap;
  804. }
  805. qplib_qp->dpi = &cntx->dpi;
  806. return 0;
  807. rqfail:
  808. ib_umem_release(qp->sumem);
  809. qp->sumem = NULL;
  810. qplib_qp->sq.sglist = NULL;
  811. qplib_qp->sq.nmap = 0;
  812. return PTR_ERR(umem);
  813. }
  814. static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
  815. (struct bnxt_re_pd *pd,
  816. struct bnxt_qplib_res *qp1_res,
  817. struct bnxt_qplib_qp *qp1_qp)
  818. {
  819. struct bnxt_re_dev *rdev = pd->rdev;
  820. struct bnxt_re_ah *ah;
  821. union ib_gid sgid;
  822. int rc;
  823. ah = kzalloc(sizeof(*ah), GFP_KERNEL);
  824. if (!ah)
  825. return NULL;
  826. ah->rdev = rdev;
  827. ah->qplib_ah.pd = &pd->qplib_pd;
  828. rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
  829. if (rc)
  830. goto fail;
  831. /* supply the dgid data same as sgid */
  832. memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
  833. sizeof(union ib_gid));
  834. ah->qplib_ah.sgid_index = 0;
  835. ah->qplib_ah.traffic_class = 0;
  836. ah->qplib_ah.flow_label = 0;
  837. ah->qplib_ah.hop_limit = 1;
  838. ah->qplib_ah.sl = 0;
  839. /* Have DMAC same as SMAC */
  840. ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
  841. rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
  842. if (rc) {
  843. dev_err(rdev_to_dev(rdev),
  844. "Failed to allocate HW AH for Shadow QP");
  845. goto fail;
  846. }
  847. return ah;
  848. fail:
  849. kfree(ah);
  850. return NULL;
  851. }
  852. static struct bnxt_re_qp *bnxt_re_create_shadow_qp
  853. (struct bnxt_re_pd *pd,
  854. struct bnxt_qplib_res *qp1_res,
  855. struct bnxt_qplib_qp *qp1_qp)
  856. {
  857. struct bnxt_re_dev *rdev = pd->rdev;
  858. struct bnxt_re_qp *qp;
  859. int rc;
  860. qp = kzalloc(sizeof(*qp), GFP_KERNEL);
  861. if (!qp)
  862. return NULL;
  863. qp->rdev = rdev;
  864. /* Initialize the shadow QP structure from the QP1 values */
  865. ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
  866. qp->qplib_qp.pd = &pd->qplib_pd;
  867. qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
  868. qp->qplib_qp.type = IB_QPT_UD;
  869. qp->qplib_qp.max_inline_data = 0;
  870. qp->qplib_qp.sig_type = true;
  871. /* Shadow QP SQ depth should be same as QP1 RQ depth */
  872. qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
  873. qp->qplib_qp.sq.max_sge = 2;
  874. /* Q full delta can be 1 since it is internal QP */
  875. qp->qplib_qp.sq.q_full_delta = 1;
  876. qp->qplib_qp.scq = qp1_qp->scq;
  877. qp->qplib_qp.rcq = qp1_qp->rcq;
  878. qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
  879. qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
  880. /* Q full delta can be 1 since it is internal QP */
  881. qp->qplib_qp.rq.q_full_delta = 1;
  882. qp->qplib_qp.mtu = qp1_qp->mtu;
  883. qp->qplib_qp.sq_hdr_buf_size = 0;
  884. qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
  885. qp->qplib_qp.dpi = &rdev->dpi_privileged;
  886. rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
  887. if (rc)
  888. goto fail;
  889. rdev->sqp_id = qp->qplib_qp.id;
  890. spin_lock_init(&qp->sq_lock);
  891. INIT_LIST_HEAD(&qp->list);
  892. mutex_lock(&rdev->qp_lock);
  893. list_add_tail(&qp->list, &rdev->qp_list);
  894. atomic_inc(&rdev->qp_count);
  895. mutex_unlock(&rdev->qp_lock);
  896. return qp;
  897. fail:
  898. kfree(qp);
  899. return NULL;
  900. }
  901. struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
  902. struct ib_qp_init_attr *qp_init_attr,
  903. struct ib_udata *udata)
  904. {
  905. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  906. struct bnxt_re_dev *rdev = pd->rdev;
  907. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  908. struct bnxt_re_qp *qp;
  909. struct bnxt_re_cq *cq;
  910. struct bnxt_re_srq *srq;
  911. int rc, entries;
  912. if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
  913. (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
  914. (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
  915. (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
  916. (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
  917. return ERR_PTR(-EINVAL);
  918. qp = kzalloc(sizeof(*qp), GFP_KERNEL);
  919. if (!qp)
  920. return ERR_PTR(-ENOMEM);
  921. qp->rdev = rdev;
  922. ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
  923. qp->qplib_qp.pd = &pd->qplib_pd;
  924. qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
  925. qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
  926. if (qp->qplib_qp.type == IB_QPT_MAX) {
  927. dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
  928. qp->qplib_qp.type);
  929. rc = -EINVAL;
  930. goto fail;
  931. }
  932. qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
  933. qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
  934. IB_SIGNAL_ALL_WR) ? true : false);
  935. qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
  936. if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
  937. qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
  938. if (qp_init_attr->send_cq) {
  939. cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
  940. ib_cq);
  941. if (!cq) {
  942. dev_err(rdev_to_dev(rdev), "Send CQ not found");
  943. rc = -EINVAL;
  944. goto fail;
  945. }
  946. qp->qplib_qp.scq = &cq->qplib_cq;
  947. qp->scq = cq;
  948. }
  949. if (qp_init_attr->recv_cq) {
  950. cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
  951. ib_cq);
  952. if (!cq) {
  953. dev_err(rdev_to_dev(rdev), "Receive CQ not found");
  954. rc = -EINVAL;
  955. goto fail;
  956. }
  957. qp->qplib_qp.rcq = &cq->qplib_cq;
  958. qp->rcq = cq;
  959. }
  960. if (qp_init_attr->srq) {
  961. srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
  962. ib_srq);
  963. if (!srq) {
  964. dev_err(rdev_to_dev(rdev), "SRQ not found");
  965. rc = -EINVAL;
  966. goto fail;
  967. }
  968. qp->qplib_qp.srq = &srq->qplib_srq;
  969. qp->qplib_qp.rq.max_wqe = 0;
  970. } else {
  971. /* Allocate 1 more than what's provided so posting max doesn't
  972. * mean empty
  973. */
  974. entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
  975. qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
  976. dev_attr->max_qp_wqes + 1);
  977. qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
  978. qp_init_attr->cap.max_recv_wr;
  979. qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
  980. if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
  981. qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
  982. }
  983. qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
  984. if (qp_init_attr->qp_type == IB_QPT_GSI) {
  985. /* Allocate 1 more than what's provided */
  986. entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
  987. qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
  988. dev_attr->max_qp_wqes + 1);
  989. qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
  990. qp_init_attr->cap.max_send_wr;
  991. qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
  992. if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
  993. qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
  994. qp->qplib_qp.sq.max_sge++;
  995. if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
  996. qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
  997. qp->qplib_qp.rq_hdr_buf_size =
  998. BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
  999. qp->qplib_qp.sq_hdr_buf_size =
  1000. BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
  1001. qp->qplib_qp.dpi = &rdev->dpi_privileged;
  1002. rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
  1003. if (rc) {
  1004. dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
  1005. goto fail;
  1006. }
  1007. /* Create a shadow QP to handle the QP1 traffic */
  1008. rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
  1009. &qp->qplib_qp);
  1010. if (!rdev->qp1_sqp) {
  1011. rc = -EINVAL;
  1012. dev_err(rdev_to_dev(rdev),
  1013. "Failed to create Shadow QP for QP1");
  1014. goto qp_destroy;
  1015. }
  1016. rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
  1017. &qp->qplib_qp);
  1018. if (!rdev->sqp_ah) {
  1019. bnxt_qplib_destroy_qp(&rdev->qplib_res,
  1020. &rdev->qp1_sqp->qplib_qp);
  1021. rc = -EINVAL;
  1022. dev_err(rdev_to_dev(rdev),
  1023. "Failed to create AH entry for ShadowQP");
  1024. goto qp_destroy;
  1025. }
  1026. } else {
  1027. /* Allocate 128 + 1 more than what's provided */
  1028. entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
  1029. BNXT_QPLIB_RESERVED_QP_WRS + 1);
  1030. qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
  1031. dev_attr->max_qp_wqes +
  1032. BNXT_QPLIB_RESERVED_QP_WRS + 1);
  1033. qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
  1034. /*
  1035. * Reserving one slot for Phantom WQE. Application can
  1036. * post one extra entry in this case. But allowing this to avoid
  1037. * unexpected Queue full condition
  1038. */
  1039. qp->qplib_qp.sq.q_full_delta -= 1;
  1040. qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
  1041. qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
  1042. if (udata) {
  1043. rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
  1044. if (rc)
  1045. goto fail;
  1046. } else {
  1047. qp->qplib_qp.dpi = &rdev->dpi_privileged;
  1048. }
  1049. rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
  1050. if (rc) {
  1051. dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
  1052. goto free_umem;
  1053. }
  1054. }
  1055. qp->ib_qp.qp_num = qp->qplib_qp.id;
  1056. spin_lock_init(&qp->sq_lock);
  1057. spin_lock_init(&qp->rq_lock);
  1058. if (udata) {
  1059. struct bnxt_re_qp_resp resp;
  1060. resp.qpid = qp->ib_qp.qp_num;
  1061. resp.rsvd = 0;
  1062. rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
  1063. if (rc) {
  1064. dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
  1065. goto qp_destroy;
  1066. }
  1067. }
  1068. INIT_LIST_HEAD(&qp->list);
  1069. mutex_lock(&rdev->qp_lock);
  1070. list_add_tail(&qp->list, &rdev->qp_list);
  1071. atomic_inc(&rdev->qp_count);
  1072. mutex_unlock(&rdev->qp_lock);
  1073. return &qp->ib_qp;
  1074. qp_destroy:
  1075. bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
  1076. free_umem:
  1077. if (udata) {
  1078. if (qp->rumem)
  1079. ib_umem_release(qp->rumem);
  1080. if (qp->sumem)
  1081. ib_umem_release(qp->sumem);
  1082. }
  1083. fail:
  1084. kfree(qp);
  1085. return ERR_PTR(rc);
  1086. }
  1087. static u8 __from_ib_qp_state(enum ib_qp_state state)
  1088. {
  1089. switch (state) {
  1090. case IB_QPS_RESET:
  1091. return CMDQ_MODIFY_QP_NEW_STATE_RESET;
  1092. case IB_QPS_INIT:
  1093. return CMDQ_MODIFY_QP_NEW_STATE_INIT;
  1094. case IB_QPS_RTR:
  1095. return CMDQ_MODIFY_QP_NEW_STATE_RTR;
  1096. case IB_QPS_RTS:
  1097. return CMDQ_MODIFY_QP_NEW_STATE_RTS;
  1098. case IB_QPS_SQD:
  1099. return CMDQ_MODIFY_QP_NEW_STATE_SQD;
  1100. case IB_QPS_SQE:
  1101. return CMDQ_MODIFY_QP_NEW_STATE_SQE;
  1102. case IB_QPS_ERR:
  1103. default:
  1104. return CMDQ_MODIFY_QP_NEW_STATE_ERR;
  1105. }
  1106. }
  1107. static enum ib_qp_state __to_ib_qp_state(u8 state)
  1108. {
  1109. switch (state) {
  1110. case CMDQ_MODIFY_QP_NEW_STATE_RESET:
  1111. return IB_QPS_RESET;
  1112. case CMDQ_MODIFY_QP_NEW_STATE_INIT:
  1113. return IB_QPS_INIT;
  1114. case CMDQ_MODIFY_QP_NEW_STATE_RTR:
  1115. return IB_QPS_RTR;
  1116. case CMDQ_MODIFY_QP_NEW_STATE_RTS:
  1117. return IB_QPS_RTS;
  1118. case CMDQ_MODIFY_QP_NEW_STATE_SQD:
  1119. return IB_QPS_SQD;
  1120. case CMDQ_MODIFY_QP_NEW_STATE_SQE:
  1121. return IB_QPS_SQE;
  1122. case CMDQ_MODIFY_QP_NEW_STATE_ERR:
  1123. default:
  1124. return IB_QPS_ERR;
  1125. }
  1126. }
  1127. static u32 __from_ib_mtu(enum ib_mtu mtu)
  1128. {
  1129. switch (mtu) {
  1130. case IB_MTU_256:
  1131. return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
  1132. case IB_MTU_512:
  1133. return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
  1134. case IB_MTU_1024:
  1135. return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
  1136. case IB_MTU_2048:
  1137. return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
  1138. case IB_MTU_4096:
  1139. return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
  1140. default:
  1141. return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
  1142. }
  1143. }
  1144. static enum ib_mtu __to_ib_mtu(u32 mtu)
  1145. {
  1146. switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
  1147. case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
  1148. return IB_MTU_256;
  1149. case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
  1150. return IB_MTU_512;
  1151. case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
  1152. return IB_MTU_1024;
  1153. case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
  1154. return IB_MTU_2048;
  1155. case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
  1156. return IB_MTU_4096;
  1157. default:
  1158. return IB_MTU_2048;
  1159. }
  1160. }
  1161. /* Shared Receive Queues */
  1162. int bnxt_re_destroy_srq(struct ib_srq *ib_srq)
  1163. {
  1164. struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
  1165. ib_srq);
  1166. struct bnxt_re_dev *rdev = srq->rdev;
  1167. struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
  1168. struct bnxt_qplib_nq *nq = NULL;
  1169. int rc;
  1170. if (qplib_srq->cq)
  1171. nq = qplib_srq->cq->nq;
  1172. rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
  1173. if (rc) {
  1174. dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
  1175. return rc;
  1176. }
  1177. if (srq->umem)
  1178. ib_umem_release(srq->umem);
  1179. kfree(srq);
  1180. atomic_dec(&rdev->srq_count);
  1181. if (nq)
  1182. nq->budget--;
  1183. return 0;
  1184. }
  1185. static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
  1186. struct bnxt_re_pd *pd,
  1187. struct bnxt_re_srq *srq,
  1188. struct ib_udata *udata)
  1189. {
  1190. struct bnxt_re_srq_req ureq;
  1191. struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
  1192. struct ib_umem *umem;
  1193. int bytes = 0;
  1194. struct ib_ucontext *context = pd->ib_pd.uobject->context;
  1195. struct bnxt_re_ucontext *cntx = container_of(context,
  1196. struct bnxt_re_ucontext,
  1197. ib_uctx);
  1198. if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
  1199. return -EFAULT;
  1200. bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
  1201. bytes = PAGE_ALIGN(bytes);
  1202. umem = ib_umem_get(context, ureq.srqva, bytes,
  1203. IB_ACCESS_LOCAL_WRITE, 1);
  1204. if (IS_ERR(umem))
  1205. return PTR_ERR(umem);
  1206. srq->umem = umem;
  1207. qplib_srq->nmap = umem->nmap;
  1208. qplib_srq->sglist = umem->sg_head.sgl;
  1209. qplib_srq->srq_handle = ureq.srq_handle;
  1210. qplib_srq->dpi = &cntx->dpi;
  1211. return 0;
  1212. }
  1213. struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
  1214. struct ib_srq_init_attr *srq_init_attr,
  1215. struct ib_udata *udata)
  1216. {
  1217. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  1218. struct bnxt_re_dev *rdev = pd->rdev;
  1219. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  1220. struct bnxt_re_srq *srq;
  1221. struct bnxt_qplib_nq *nq = NULL;
  1222. int rc, entries;
  1223. if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
  1224. dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
  1225. rc = -EINVAL;
  1226. goto exit;
  1227. }
  1228. if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
  1229. rc = -ENOTSUPP;
  1230. goto exit;
  1231. }
  1232. srq = kzalloc(sizeof(*srq), GFP_KERNEL);
  1233. if (!srq) {
  1234. rc = -ENOMEM;
  1235. goto exit;
  1236. }
  1237. srq->rdev = rdev;
  1238. srq->qplib_srq.pd = &pd->qplib_pd;
  1239. srq->qplib_srq.dpi = &rdev->dpi_privileged;
  1240. /* Allocate 1 more than what's provided so posting max doesn't
  1241. * mean empty
  1242. */
  1243. entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
  1244. if (entries > dev_attr->max_srq_wqes + 1)
  1245. entries = dev_attr->max_srq_wqes + 1;
  1246. srq->qplib_srq.max_wqe = entries;
  1247. srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
  1248. srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
  1249. srq->srq_limit = srq_init_attr->attr.srq_limit;
  1250. srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
  1251. nq = &rdev->nq[0];
  1252. if (udata) {
  1253. rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
  1254. if (rc)
  1255. goto fail;
  1256. }
  1257. rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
  1258. if (rc) {
  1259. dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
  1260. goto fail;
  1261. }
  1262. if (udata) {
  1263. struct bnxt_re_srq_resp resp;
  1264. resp.srqid = srq->qplib_srq.id;
  1265. rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
  1266. if (rc) {
  1267. dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
  1268. bnxt_qplib_destroy_srq(&rdev->qplib_res,
  1269. &srq->qplib_srq);
  1270. goto exit;
  1271. }
  1272. }
  1273. if (nq)
  1274. nq->budget++;
  1275. atomic_inc(&rdev->srq_count);
  1276. return &srq->ib_srq;
  1277. fail:
  1278. if (srq->umem)
  1279. ib_umem_release(srq->umem);
  1280. kfree(srq);
  1281. exit:
  1282. return ERR_PTR(rc);
  1283. }
  1284. int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
  1285. enum ib_srq_attr_mask srq_attr_mask,
  1286. struct ib_udata *udata)
  1287. {
  1288. struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
  1289. ib_srq);
  1290. struct bnxt_re_dev *rdev = srq->rdev;
  1291. int rc;
  1292. switch (srq_attr_mask) {
  1293. case IB_SRQ_MAX_WR:
  1294. /* SRQ resize is not supported */
  1295. break;
  1296. case IB_SRQ_LIMIT:
  1297. /* Change the SRQ threshold */
  1298. if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
  1299. return -EINVAL;
  1300. srq->qplib_srq.threshold = srq_attr->srq_limit;
  1301. rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
  1302. if (rc) {
  1303. dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
  1304. return rc;
  1305. }
  1306. /* On success, update the shadow */
  1307. srq->srq_limit = srq_attr->srq_limit;
  1308. /* No need to Build and send response back to udata */
  1309. break;
  1310. default:
  1311. dev_err(rdev_to_dev(rdev),
  1312. "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
  1313. return -EINVAL;
  1314. }
  1315. return 0;
  1316. }
  1317. int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
  1318. {
  1319. struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
  1320. ib_srq);
  1321. struct bnxt_re_srq tsrq;
  1322. struct bnxt_re_dev *rdev = srq->rdev;
  1323. int rc;
  1324. /* Get live SRQ attr */
  1325. tsrq.qplib_srq.id = srq->qplib_srq.id;
  1326. rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
  1327. if (rc) {
  1328. dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
  1329. return rc;
  1330. }
  1331. srq_attr->max_wr = srq->qplib_srq.max_wqe;
  1332. srq_attr->max_sge = srq->qplib_srq.max_sge;
  1333. srq_attr->srq_limit = tsrq.qplib_srq.threshold;
  1334. return 0;
  1335. }
  1336. int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, struct ib_recv_wr *wr,
  1337. struct ib_recv_wr **bad_wr)
  1338. {
  1339. struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
  1340. ib_srq);
  1341. struct bnxt_qplib_swqe wqe;
  1342. unsigned long flags;
  1343. int rc = 0;
  1344. spin_lock_irqsave(&srq->lock, flags);
  1345. while (wr) {
  1346. /* Transcribe each ib_recv_wr to qplib_swqe */
  1347. wqe.num_sge = wr->num_sge;
  1348. bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
  1349. wqe.wr_id = wr->wr_id;
  1350. wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
  1351. rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
  1352. if (rc) {
  1353. *bad_wr = wr;
  1354. break;
  1355. }
  1356. wr = wr->next;
  1357. }
  1358. spin_unlock_irqrestore(&srq->lock, flags);
  1359. return rc;
  1360. }
  1361. static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
  1362. struct bnxt_re_qp *qp1_qp,
  1363. int qp_attr_mask)
  1364. {
  1365. struct bnxt_re_qp *qp = rdev->qp1_sqp;
  1366. int rc = 0;
  1367. if (qp_attr_mask & IB_QP_STATE) {
  1368. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
  1369. qp->qplib_qp.state = qp1_qp->qplib_qp.state;
  1370. }
  1371. if (qp_attr_mask & IB_QP_PKEY_INDEX) {
  1372. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
  1373. qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
  1374. }
  1375. if (qp_attr_mask & IB_QP_QKEY) {
  1376. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
  1377. /* Using a Random QKEY */
  1378. qp->qplib_qp.qkey = 0x81818181;
  1379. }
  1380. if (qp_attr_mask & IB_QP_SQ_PSN) {
  1381. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
  1382. qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
  1383. }
  1384. rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
  1385. if (rc)
  1386. dev_err(rdev_to_dev(rdev),
  1387. "Failed to modify Shadow QP for QP1");
  1388. return rc;
  1389. }
  1390. int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
  1391. int qp_attr_mask, struct ib_udata *udata)
  1392. {
  1393. struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
  1394. struct bnxt_re_dev *rdev = qp->rdev;
  1395. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  1396. enum ib_qp_state curr_qp_state, new_qp_state;
  1397. int rc, entries;
  1398. unsigned int flags;
  1399. u8 nw_type;
  1400. qp->qplib_qp.modify_flags = 0;
  1401. if (qp_attr_mask & IB_QP_STATE) {
  1402. curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
  1403. new_qp_state = qp_attr->qp_state;
  1404. if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
  1405. ib_qp->qp_type, qp_attr_mask,
  1406. IB_LINK_LAYER_ETHERNET)) {
  1407. dev_err(rdev_to_dev(rdev),
  1408. "Invalid attribute mask: %#x specified ",
  1409. qp_attr_mask);
  1410. dev_err(rdev_to_dev(rdev),
  1411. "for qpn: %#x type: %#x",
  1412. ib_qp->qp_num, ib_qp->qp_type);
  1413. dev_err(rdev_to_dev(rdev),
  1414. "curr_qp_state=0x%x, new_qp_state=0x%x\n",
  1415. curr_qp_state, new_qp_state);
  1416. return -EINVAL;
  1417. }
  1418. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
  1419. qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
  1420. if (!qp->sumem &&
  1421. qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
  1422. dev_dbg(rdev_to_dev(rdev),
  1423. "Move QP = %p to flush list\n",
  1424. qp);
  1425. flags = bnxt_re_lock_cqs(qp);
  1426. bnxt_qplib_add_flush_qp(&qp->qplib_qp);
  1427. bnxt_re_unlock_cqs(qp, flags);
  1428. }
  1429. if (!qp->sumem &&
  1430. qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
  1431. dev_dbg(rdev_to_dev(rdev),
  1432. "Move QP = %p out of flush list\n",
  1433. qp);
  1434. flags = bnxt_re_lock_cqs(qp);
  1435. bnxt_qplib_clean_qp(&qp->qplib_qp);
  1436. bnxt_re_unlock_cqs(qp, flags);
  1437. }
  1438. }
  1439. if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
  1440. qp->qplib_qp.modify_flags |=
  1441. CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
  1442. qp->qplib_qp.en_sqd_async_notify = true;
  1443. }
  1444. if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
  1445. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
  1446. qp->qplib_qp.access =
  1447. __from_ib_access_flags(qp_attr->qp_access_flags);
  1448. /* LOCAL_WRITE access must be set to allow RC receive */
  1449. qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
  1450. }
  1451. if (qp_attr_mask & IB_QP_PKEY_INDEX) {
  1452. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
  1453. qp->qplib_qp.pkey_index = qp_attr->pkey_index;
  1454. }
  1455. if (qp_attr_mask & IB_QP_QKEY) {
  1456. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
  1457. qp->qplib_qp.qkey = qp_attr->qkey;
  1458. }
  1459. if (qp_attr_mask & IB_QP_AV) {
  1460. const struct ib_global_route *grh =
  1461. rdma_ah_read_grh(&qp_attr->ah_attr);
  1462. const struct ib_gid_attr *sgid_attr;
  1463. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
  1464. CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
  1465. CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
  1466. CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
  1467. CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
  1468. CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
  1469. CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
  1470. memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
  1471. sizeof(qp->qplib_qp.ah.dgid.data));
  1472. qp->qplib_qp.ah.flow_label = grh->flow_label;
  1473. /* If RoCE V2 is enabled, stack will have two entries for
  1474. * each GID entry. Avoiding this duplicte entry in HW. Dividing
  1475. * the GID index by 2 for RoCE V2
  1476. */
  1477. qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
  1478. qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
  1479. qp->qplib_qp.ah.hop_limit = grh->hop_limit;
  1480. qp->qplib_qp.ah.traffic_class = grh->traffic_class;
  1481. qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
  1482. ether_addr_copy(qp->qplib_qp.ah.dmac,
  1483. qp_attr->ah_attr.roce.dmac);
  1484. sgid_attr = qp_attr->ah_attr.grh.sgid_attr;
  1485. memcpy(qp->qplib_qp.smac, sgid_attr->ndev->dev_addr,
  1486. ETH_ALEN);
  1487. nw_type = rdma_gid_attr_network_type(sgid_attr);
  1488. switch (nw_type) {
  1489. case RDMA_NETWORK_IPV4:
  1490. qp->qplib_qp.nw_type =
  1491. CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
  1492. break;
  1493. case RDMA_NETWORK_IPV6:
  1494. qp->qplib_qp.nw_type =
  1495. CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
  1496. break;
  1497. default:
  1498. qp->qplib_qp.nw_type =
  1499. CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
  1500. break;
  1501. }
  1502. }
  1503. if (qp_attr_mask & IB_QP_PATH_MTU) {
  1504. qp->qplib_qp.modify_flags |=
  1505. CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
  1506. qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
  1507. qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
  1508. } else if (qp_attr->qp_state == IB_QPS_RTR) {
  1509. qp->qplib_qp.modify_flags |=
  1510. CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
  1511. qp->qplib_qp.path_mtu =
  1512. __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
  1513. qp->qplib_qp.mtu =
  1514. ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
  1515. }
  1516. if (qp_attr_mask & IB_QP_TIMEOUT) {
  1517. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
  1518. qp->qplib_qp.timeout = qp_attr->timeout;
  1519. }
  1520. if (qp_attr_mask & IB_QP_RETRY_CNT) {
  1521. qp->qplib_qp.modify_flags |=
  1522. CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
  1523. qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
  1524. }
  1525. if (qp_attr_mask & IB_QP_RNR_RETRY) {
  1526. qp->qplib_qp.modify_flags |=
  1527. CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
  1528. qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
  1529. }
  1530. if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
  1531. qp->qplib_qp.modify_flags |=
  1532. CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
  1533. qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
  1534. }
  1535. if (qp_attr_mask & IB_QP_RQ_PSN) {
  1536. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
  1537. qp->qplib_qp.rq.psn = qp_attr->rq_psn;
  1538. }
  1539. if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
  1540. qp->qplib_qp.modify_flags |=
  1541. CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
  1542. /* Cap the max_rd_atomic to device max */
  1543. qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
  1544. dev_attr->max_qp_rd_atom);
  1545. }
  1546. if (qp_attr_mask & IB_QP_SQ_PSN) {
  1547. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
  1548. qp->qplib_qp.sq.psn = qp_attr->sq_psn;
  1549. }
  1550. if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
  1551. if (qp_attr->max_dest_rd_atomic >
  1552. dev_attr->max_qp_init_rd_atom) {
  1553. dev_err(rdev_to_dev(rdev),
  1554. "max_dest_rd_atomic requested%d is > dev_max%d",
  1555. qp_attr->max_dest_rd_atomic,
  1556. dev_attr->max_qp_init_rd_atom);
  1557. return -EINVAL;
  1558. }
  1559. qp->qplib_qp.modify_flags |=
  1560. CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
  1561. qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
  1562. }
  1563. if (qp_attr_mask & IB_QP_CAP) {
  1564. qp->qplib_qp.modify_flags |=
  1565. CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
  1566. CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
  1567. CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
  1568. CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
  1569. CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
  1570. if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
  1571. (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
  1572. (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
  1573. (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
  1574. (qp_attr->cap.max_inline_data >=
  1575. dev_attr->max_inline_data)) {
  1576. dev_err(rdev_to_dev(rdev),
  1577. "Create QP failed - max exceeded");
  1578. return -EINVAL;
  1579. }
  1580. entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
  1581. qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
  1582. dev_attr->max_qp_wqes + 1);
  1583. qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
  1584. qp_attr->cap.max_send_wr;
  1585. /*
  1586. * Reserving one slot for Phantom WQE. Some application can
  1587. * post one extra entry in this case. Allowing this to avoid
  1588. * unexpected Queue full condition
  1589. */
  1590. qp->qplib_qp.sq.q_full_delta -= 1;
  1591. qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
  1592. if (qp->qplib_qp.rq.max_wqe) {
  1593. entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
  1594. qp->qplib_qp.rq.max_wqe =
  1595. min_t(u32, entries, dev_attr->max_qp_wqes + 1);
  1596. qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
  1597. qp_attr->cap.max_recv_wr;
  1598. qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
  1599. } else {
  1600. /* SRQ was used prior, just ignore the RQ caps */
  1601. }
  1602. }
  1603. if (qp_attr_mask & IB_QP_DEST_QPN) {
  1604. qp->qplib_qp.modify_flags |=
  1605. CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
  1606. qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
  1607. }
  1608. rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
  1609. if (rc) {
  1610. dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
  1611. return rc;
  1612. }
  1613. if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
  1614. rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
  1615. return rc;
  1616. }
  1617. int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
  1618. int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
  1619. {
  1620. struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
  1621. struct bnxt_re_dev *rdev = qp->rdev;
  1622. struct bnxt_qplib_qp *qplib_qp;
  1623. int rc;
  1624. qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
  1625. if (!qplib_qp)
  1626. return -ENOMEM;
  1627. qplib_qp->id = qp->qplib_qp.id;
  1628. qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
  1629. rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
  1630. if (rc) {
  1631. dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
  1632. goto out;
  1633. }
  1634. qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
  1635. qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
  1636. qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
  1637. qp_attr->pkey_index = qplib_qp->pkey_index;
  1638. qp_attr->qkey = qplib_qp->qkey;
  1639. qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
  1640. rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
  1641. qplib_qp->ah.host_sgid_index,
  1642. qplib_qp->ah.hop_limit,
  1643. qplib_qp->ah.traffic_class);
  1644. rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
  1645. rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
  1646. ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
  1647. qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
  1648. qp_attr->timeout = qplib_qp->timeout;
  1649. qp_attr->retry_cnt = qplib_qp->retry_cnt;
  1650. qp_attr->rnr_retry = qplib_qp->rnr_retry;
  1651. qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
  1652. qp_attr->rq_psn = qplib_qp->rq.psn;
  1653. qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
  1654. qp_attr->sq_psn = qplib_qp->sq.psn;
  1655. qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
  1656. qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
  1657. IB_SIGNAL_REQ_WR;
  1658. qp_attr->dest_qp_num = qplib_qp->dest_qpn;
  1659. qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
  1660. qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
  1661. qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
  1662. qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
  1663. qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
  1664. qp_init_attr->cap = qp_attr->cap;
  1665. out:
  1666. kfree(qplib_qp);
  1667. return rc;
  1668. }
  1669. /* Routine for sending QP1 packets for RoCE V1 an V2
  1670. */
  1671. static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
  1672. struct ib_send_wr *wr,
  1673. struct bnxt_qplib_swqe *wqe,
  1674. int payload_size)
  1675. {
  1676. struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
  1677. ib_ah);
  1678. struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
  1679. const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
  1680. struct bnxt_qplib_sge sge;
  1681. u8 nw_type;
  1682. u16 ether_type;
  1683. union ib_gid dgid;
  1684. bool is_eth = false;
  1685. bool is_vlan = false;
  1686. bool is_grh = false;
  1687. bool is_udp = false;
  1688. u8 ip_version = 0;
  1689. u16 vlan_id = 0xFFFF;
  1690. void *buf;
  1691. int i, rc = 0;
  1692. memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
  1693. if (is_vlan_dev(sgid_attr->ndev))
  1694. vlan_id = vlan_dev_vlan_id(sgid_attr->ndev);
  1695. /* Get network header type for this GID */
  1696. nw_type = rdma_gid_attr_network_type(sgid_attr);
  1697. switch (nw_type) {
  1698. case RDMA_NETWORK_IPV4:
  1699. nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
  1700. break;
  1701. case RDMA_NETWORK_IPV6:
  1702. nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
  1703. break;
  1704. default:
  1705. nw_type = BNXT_RE_ROCE_V1_PACKET;
  1706. break;
  1707. }
  1708. memcpy(&dgid.raw, &qplib_ah->dgid, 16);
  1709. is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
  1710. if (is_udp) {
  1711. if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
  1712. ip_version = 4;
  1713. ether_type = ETH_P_IP;
  1714. } else {
  1715. ip_version = 6;
  1716. ether_type = ETH_P_IPV6;
  1717. }
  1718. is_grh = false;
  1719. } else {
  1720. ether_type = ETH_P_IBOE;
  1721. is_grh = true;
  1722. }
  1723. is_eth = true;
  1724. is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
  1725. ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
  1726. ip_version, is_udp, 0, &qp->qp1_hdr);
  1727. /* ETH */
  1728. ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
  1729. ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
  1730. /* For vlan, check the sgid for vlan existence */
  1731. if (!is_vlan) {
  1732. qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
  1733. } else {
  1734. qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
  1735. qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
  1736. }
  1737. if (is_grh || (ip_version == 6)) {
  1738. memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
  1739. sizeof(sgid_attr->gid));
  1740. memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
  1741. sizeof(sgid_attr->gid));
  1742. qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
  1743. }
  1744. if (ip_version == 4) {
  1745. qp->qp1_hdr.ip4.tos = 0;
  1746. qp->qp1_hdr.ip4.id = 0;
  1747. qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
  1748. qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
  1749. memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
  1750. memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
  1751. qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
  1752. }
  1753. if (is_udp) {
  1754. qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
  1755. qp->qp1_hdr.udp.sport = htons(0x8CD1);
  1756. qp->qp1_hdr.udp.csum = 0;
  1757. }
  1758. /* BTH */
  1759. if (wr->opcode == IB_WR_SEND_WITH_IMM) {
  1760. qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
  1761. qp->qp1_hdr.immediate_present = 1;
  1762. } else {
  1763. qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
  1764. }
  1765. if (wr->send_flags & IB_SEND_SOLICITED)
  1766. qp->qp1_hdr.bth.solicited_event = 1;
  1767. /* pad_count */
  1768. qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
  1769. /* P_key for QP1 is for all members */
  1770. qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
  1771. qp->qp1_hdr.bth.destination_qpn = IB_QP1;
  1772. qp->qp1_hdr.bth.ack_req = 0;
  1773. qp->send_psn++;
  1774. qp->send_psn &= BTH_PSN_MASK;
  1775. qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
  1776. /* DETH */
  1777. /* Use the priviledged Q_Key for QP1 */
  1778. qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
  1779. qp->qp1_hdr.deth.source_qpn = IB_QP1;
  1780. /* Pack the QP1 to the transmit buffer */
  1781. buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
  1782. if (buf) {
  1783. ib_ud_header_pack(&qp->qp1_hdr, buf);
  1784. for (i = wqe->num_sge; i; i--) {
  1785. wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
  1786. wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
  1787. wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
  1788. }
  1789. /*
  1790. * Max Header buf size for IPV6 RoCE V2 is 86,
  1791. * which is same as the QP1 SQ header buffer.
  1792. * Header buf size for IPV4 RoCE V2 can be 66.
  1793. * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
  1794. * Subtract 20 bytes from QP1 SQ header buf size
  1795. */
  1796. if (is_udp && ip_version == 4)
  1797. sge.size -= 20;
  1798. /*
  1799. * Max Header buf size for RoCE V1 is 78.
  1800. * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
  1801. * Subtract 8 bytes from QP1 SQ header buf size
  1802. */
  1803. if (!is_udp)
  1804. sge.size -= 8;
  1805. /* Subtract 4 bytes for non vlan packets */
  1806. if (!is_vlan)
  1807. sge.size -= 4;
  1808. wqe->sg_list[0].addr = sge.addr;
  1809. wqe->sg_list[0].lkey = sge.lkey;
  1810. wqe->sg_list[0].size = sge.size;
  1811. wqe->num_sge++;
  1812. } else {
  1813. dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
  1814. rc = -ENOMEM;
  1815. }
  1816. return rc;
  1817. }
  1818. /* For the MAD layer, it only provides the recv SGE the size of
  1819. * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
  1820. * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
  1821. * receive packet (334 bytes) with no VLAN and then copy the GRH
  1822. * and the MAD datagram out to the provided SGE.
  1823. */
  1824. static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
  1825. struct ib_recv_wr *wr,
  1826. struct bnxt_qplib_swqe *wqe,
  1827. int payload_size)
  1828. {
  1829. struct bnxt_qplib_sge ref, sge;
  1830. u32 rq_prod_index;
  1831. struct bnxt_re_sqp_entries *sqp_entry;
  1832. rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
  1833. if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
  1834. return -ENOMEM;
  1835. /* Create 1 SGE to receive the entire
  1836. * ethernet packet
  1837. */
  1838. /* Save the reference from ULP */
  1839. ref.addr = wqe->sg_list[0].addr;
  1840. ref.lkey = wqe->sg_list[0].lkey;
  1841. ref.size = wqe->sg_list[0].size;
  1842. sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
  1843. /* SGE 1 */
  1844. wqe->sg_list[0].addr = sge.addr;
  1845. wqe->sg_list[0].lkey = sge.lkey;
  1846. wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
  1847. sge.size -= wqe->sg_list[0].size;
  1848. sqp_entry->sge.addr = ref.addr;
  1849. sqp_entry->sge.lkey = ref.lkey;
  1850. sqp_entry->sge.size = ref.size;
  1851. /* Store the wrid for reporting completion */
  1852. sqp_entry->wrid = wqe->wr_id;
  1853. /* change the wqe->wrid to table index */
  1854. wqe->wr_id = rq_prod_index;
  1855. return 0;
  1856. }
  1857. static int is_ud_qp(struct bnxt_re_qp *qp)
  1858. {
  1859. return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
  1860. }
  1861. static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
  1862. struct ib_send_wr *wr,
  1863. struct bnxt_qplib_swqe *wqe)
  1864. {
  1865. struct bnxt_re_ah *ah = NULL;
  1866. if (is_ud_qp(qp)) {
  1867. ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
  1868. wqe->send.q_key = ud_wr(wr)->remote_qkey;
  1869. wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
  1870. wqe->send.avid = ah->qplib_ah.id;
  1871. }
  1872. switch (wr->opcode) {
  1873. case IB_WR_SEND:
  1874. wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
  1875. break;
  1876. case IB_WR_SEND_WITH_IMM:
  1877. wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
  1878. wqe->send.imm_data = wr->ex.imm_data;
  1879. break;
  1880. case IB_WR_SEND_WITH_INV:
  1881. wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
  1882. wqe->send.inv_key = wr->ex.invalidate_rkey;
  1883. break;
  1884. default:
  1885. return -EINVAL;
  1886. }
  1887. if (wr->send_flags & IB_SEND_SIGNALED)
  1888. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  1889. if (wr->send_flags & IB_SEND_FENCE)
  1890. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  1891. if (wr->send_flags & IB_SEND_SOLICITED)
  1892. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
  1893. if (wr->send_flags & IB_SEND_INLINE)
  1894. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
  1895. return 0;
  1896. }
  1897. static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
  1898. struct bnxt_qplib_swqe *wqe)
  1899. {
  1900. switch (wr->opcode) {
  1901. case IB_WR_RDMA_WRITE:
  1902. wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
  1903. break;
  1904. case IB_WR_RDMA_WRITE_WITH_IMM:
  1905. wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
  1906. wqe->rdma.imm_data = wr->ex.imm_data;
  1907. break;
  1908. case IB_WR_RDMA_READ:
  1909. wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
  1910. wqe->rdma.inv_key = wr->ex.invalidate_rkey;
  1911. break;
  1912. default:
  1913. return -EINVAL;
  1914. }
  1915. wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
  1916. wqe->rdma.r_key = rdma_wr(wr)->rkey;
  1917. if (wr->send_flags & IB_SEND_SIGNALED)
  1918. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  1919. if (wr->send_flags & IB_SEND_FENCE)
  1920. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  1921. if (wr->send_flags & IB_SEND_SOLICITED)
  1922. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
  1923. if (wr->send_flags & IB_SEND_INLINE)
  1924. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
  1925. return 0;
  1926. }
  1927. static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
  1928. struct bnxt_qplib_swqe *wqe)
  1929. {
  1930. switch (wr->opcode) {
  1931. case IB_WR_ATOMIC_CMP_AND_SWP:
  1932. wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
  1933. wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
  1934. wqe->atomic.swap_data = atomic_wr(wr)->swap;
  1935. break;
  1936. case IB_WR_ATOMIC_FETCH_AND_ADD:
  1937. wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
  1938. wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
  1939. break;
  1940. default:
  1941. return -EINVAL;
  1942. }
  1943. wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
  1944. wqe->atomic.r_key = atomic_wr(wr)->rkey;
  1945. if (wr->send_flags & IB_SEND_SIGNALED)
  1946. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  1947. if (wr->send_flags & IB_SEND_FENCE)
  1948. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  1949. if (wr->send_flags & IB_SEND_SOLICITED)
  1950. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
  1951. return 0;
  1952. }
  1953. static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
  1954. struct bnxt_qplib_swqe *wqe)
  1955. {
  1956. wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
  1957. wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
  1958. /* Need unconditional fence for local invalidate
  1959. * opcode to work as expected.
  1960. */
  1961. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  1962. if (wr->send_flags & IB_SEND_SIGNALED)
  1963. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  1964. if (wr->send_flags & IB_SEND_SOLICITED)
  1965. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
  1966. return 0;
  1967. }
  1968. static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
  1969. struct bnxt_qplib_swqe *wqe)
  1970. {
  1971. struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
  1972. struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
  1973. int access = wr->access;
  1974. wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
  1975. wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
  1976. wqe->frmr.page_list = mr->pages;
  1977. wqe->frmr.page_list_len = mr->npages;
  1978. wqe->frmr.levels = qplib_frpl->hwq.level + 1;
  1979. wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
  1980. /* Need unconditional fence for reg_mr
  1981. * opcode to function as expected.
  1982. */
  1983. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  1984. if (wr->wr.send_flags & IB_SEND_SIGNALED)
  1985. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  1986. if (access & IB_ACCESS_LOCAL_WRITE)
  1987. wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
  1988. if (access & IB_ACCESS_REMOTE_READ)
  1989. wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
  1990. if (access & IB_ACCESS_REMOTE_WRITE)
  1991. wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
  1992. if (access & IB_ACCESS_REMOTE_ATOMIC)
  1993. wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
  1994. if (access & IB_ACCESS_MW_BIND)
  1995. wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
  1996. wqe->frmr.l_key = wr->key;
  1997. wqe->frmr.length = wr->mr->length;
  1998. wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
  1999. wqe->frmr.va = wr->mr->iova;
  2000. return 0;
  2001. }
  2002. static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
  2003. struct ib_send_wr *wr,
  2004. struct bnxt_qplib_swqe *wqe)
  2005. {
  2006. /* Copy the inline data to the data field */
  2007. u8 *in_data;
  2008. u32 i, sge_len;
  2009. void *sge_addr;
  2010. in_data = wqe->inline_data;
  2011. for (i = 0; i < wr->num_sge; i++) {
  2012. sge_addr = (void *)(unsigned long)
  2013. wr->sg_list[i].addr;
  2014. sge_len = wr->sg_list[i].length;
  2015. if ((sge_len + wqe->inline_len) >
  2016. BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
  2017. dev_err(rdev_to_dev(rdev),
  2018. "Inline data size requested > supported value");
  2019. return -EINVAL;
  2020. }
  2021. sge_len = wr->sg_list[i].length;
  2022. memcpy(in_data, sge_addr, sge_len);
  2023. in_data += wr->sg_list[i].length;
  2024. wqe->inline_len += wr->sg_list[i].length;
  2025. }
  2026. return wqe->inline_len;
  2027. }
  2028. static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
  2029. struct ib_send_wr *wr,
  2030. struct bnxt_qplib_swqe *wqe)
  2031. {
  2032. int payload_sz = 0;
  2033. if (wr->send_flags & IB_SEND_INLINE)
  2034. payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
  2035. else
  2036. payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
  2037. wqe->num_sge);
  2038. return payload_sz;
  2039. }
  2040. static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
  2041. {
  2042. if ((qp->ib_qp.qp_type == IB_QPT_UD ||
  2043. qp->ib_qp.qp_type == IB_QPT_GSI ||
  2044. qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
  2045. qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
  2046. int qp_attr_mask;
  2047. struct ib_qp_attr qp_attr;
  2048. qp_attr_mask = IB_QP_STATE;
  2049. qp_attr.qp_state = IB_QPS_RTS;
  2050. bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
  2051. qp->qplib_qp.wqe_cnt = 0;
  2052. }
  2053. }
  2054. static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
  2055. struct bnxt_re_qp *qp,
  2056. struct ib_send_wr *wr)
  2057. {
  2058. struct bnxt_qplib_swqe wqe;
  2059. int rc = 0, payload_sz = 0;
  2060. unsigned long flags;
  2061. spin_lock_irqsave(&qp->sq_lock, flags);
  2062. memset(&wqe, 0, sizeof(wqe));
  2063. while (wr) {
  2064. /* House keeping */
  2065. memset(&wqe, 0, sizeof(wqe));
  2066. /* Common */
  2067. wqe.num_sge = wr->num_sge;
  2068. if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
  2069. dev_err(rdev_to_dev(rdev),
  2070. "Limit exceeded for Send SGEs");
  2071. rc = -EINVAL;
  2072. goto bad;
  2073. }
  2074. payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
  2075. if (payload_sz < 0) {
  2076. rc = -EINVAL;
  2077. goto bad;
  2078. }
  2079. wqe.wr_id = wr->wr_id;
  2080. wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
  2081. rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
  2082. if (!rc)
  2083. rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
  2084. bad:
  2085. if (rc) {
  2086. dev_err(rdev_to_dev(rdev),
  2087. "Post send failed opcode = %#x rc = %d",
  2088. wr->opcode, rc);
  2089. break;
  2090. }
  2091. wr = wr->next;
  2092. }
  2093. bnxt_qplib_post_send_db(&qp->qplib_qp);
  2094. bnxt_ud_qp_hw_stall_workaround(qp);
  2095. spin_unlock_irqrestore(&qp->sq_lock, flags);
  2096. return rc;
  2097. }
  2098. int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
  2099. struct ib_send_wr **bad_wr)
  2100. {
  2101. struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
  2102. struct bnxt_qplib_swqe wqe;
  2103. int rc = 0, payload_sz = 0;
  2104. unsigned long flags;
  2105. spin_lock_irqsave(&qp->sq_lock, flags);
  2106. while (wr) {
  2107. /* House keeping */
  2108. memset(&wqe, 0, sizeof(wqe));
  2109. /* Common */
  2110. wqe.num_sge = wr->num_sge;
  2111. if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
  2112. dev_err(rdev_to_dev(qp->rdev),
  2113. "Limit exceeded for Send SGEs");
  2114. rc = -EINVAL;
  2115. goto bad;
  2116. }
  2117. payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
  2118. if (payload_sz < 0) {
  2119. rc = -EINVAL;
  2120. goto bad;
  2121. }
  2122. wqe.wr_id = wr->wr_id;
  2123. switch (wr->opcode) {
  2124. case IB_WR_SEND:
  2125. case IB_WR_SEND_WITH_IMM:
  2126. if (ib_qp->qp_type == IB_QPT_GSI) {
  2127. rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
  2128. payload_sz);
  2129. if (rc)
  2130. goto bad;
  2131. wqe.rawqp1.lflags |=
  2132. SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
  2133. }
  2134. switch (wr->send_flags) {
  2135. case IB_SEND_IP_CSUM:
  2136. wqe.rawqp1.lflags |=
  2137. SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
  2138. break;
  2139. default:
  2140. break;
  2141. }
  2142. /* Fall thru to build the wqe */
  2143. case IB_WR_SEND_WITH_INV:
  2144. rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
  2145. break;
  2146. case IB_WR_RDMA_WRITE:
  2147. case IB_WR_RDMA_WRITE_WITH_IMM:
  2148. case IB_WR_RDMA_READ:
  2149. rc = bnxt_re_build_rdma_wqe(wr, &wqe);
  2150. break;
  2151. case IB_WR_ATOMIC_CMP_AND_SWP:
  2152. case IB_WR_ATOMIC_FETCH_AND_ADD:
  2153. rc = bnxt_re_build_atomic_wqe(wr, &wqe);
  2154. break;
  2155. case IB_WR_RDMA_READ_WITH_INV:
  2156. dev_err(rdev_to_dev(qp->rdev),
  2157. "RDMA Read with Invalidate is not supported");
  2158. rc = -EINVAL;
  2159. goto bad;
  2160. case IB_WR_LOCAL_INV:
  2161. rc = bnxt_re_build_inv_wqe(wr, &wqe);
  2162. break;
  2163. case IB_WR_REG_MR:
  2164. rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
  2165. break;
  2166. default:
  2167. /* Unsupported WRs */
  2168. dev_err(rdev_to_dev(qp->rdev),
  2169. "WR (%#x) is not supported", wr->opcode);
  2170. rc = -EINVAL;
  2171. goto bad;
  2172. }
  2173. if (!rc)
  2174. rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
  2175. bad:
  2176. if (rc) {
  2177. dev_err(rdev_to_dev(qp->rdev),
  2178. "post_send failed op:%#x qps = %#x rc = %d\n",
  2179. wr->opcode, qp->qplib_qp.state, rc);
  2180. *bad_wr = wr;
  2181. break;
  2182. }
  2183. wr = wr->next;
  2184. }
  2185. bnxt_qplib_post_send_db(&qp->qplib_qp);
  2186. bnxt_ud_qp_hw_stall_workaround(qp);
  2187. spin_unlock_irqrestore(&qp->sq_lock, flags);
  2188. return rc;
  2189. }
  2190. static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
  2191. struct bnxt_re_qp *qp,
  2192. struct ib_recv_wr *wr)
  2193. {
  2194. struct bnxt_qplib_swqe wqe;
  2195. int rc = 0;
  2196. memset(&wqe, 0, sizeof(wqe));
  2197. while (wr) {
  2198. /* House keeping */
  2199. memset(&wqe, 0, sizeof(wqe));
  2200. /* Common */
  2201. wqe.num_sge = wr->num_sge;
  2202. if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
  2203. dev_err(rdev_to_dev(rdev),
  2204. "Limit exceeded for Receive SGEs");
  2205. rc = -EINVAL;
  2206. break;
  2207. }
  2208. bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
  2209. wqe.wr_id = wr->wr_id;
  2210. wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
  2211. rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
  2212. if (rc)
  2213. break;
  2214. wr = wr->next;
  2215. }
  2216. if (!rc)
  2217. bnxt_qplib_post_recv_db(&qp->qplib_qp);
  2218. return rc;
  2219. }
  2220. int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
  2221. struct ib_recv_wr **bad_wr)
  2222. {
  2223. struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
  2224. struct bnxt_qplib_swqe wqe;
  2225. int rc = 0, payload_sz = 0;
  2226. unsigned long flags;
  2227. u32 count = 0;
  2228. spin_lock_irqsave(&qp->rq_lock, flags);
  2229. while (wr) {
  2230. /* House keeping */
  2231. memset(&wqe, 0, sizeof(wqe));
  2232. /* Common */
  2233. wqe.num_sge = wr->num_sge;
  2234. if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
  2235. dev_err(rdev_to_dev(qp->rdev),
  2236. "Limit exceeded for Receive SGEs");
  2237. rc = -EINVAL;
  2238. *bad_wr = wr;
  2239. break;
  2240. }
  2241. payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
  2242. wr->num_sge);
  2243. wqe.wr_id = wr->wr_id;
  2244. wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
  2245. if (ib_qp->qp_type == IB_QPT_GSI)
  2246. rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
  2247. payload_sz);
  2248. if (!rc)
  2249. rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
  2250. if (rc) {
  2251. *bad_wr = wr;
  2252. break;
  2253. }
  2254. /* Ring DB if the RQEs posted reaches a threshold value */
  2255. if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
  2256. bnxt_qplib_post_recv_db(&qp->qplib_qp);
  2257. count = 0;
  2258. }
  2259. wr = wr->next;
  2260. }
  2261. if (count)
  2262. bnxt_qplib_post_recv_db(&qp->qplib_qp);
  2263. spin_unlock_irqrestore(&qp->rq_lock, flags);
  2264. return rc;
  2265. }
  2266. /* Completion Queues */
  2267. int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
  2268. {
  2269. int rc;
  2270. struct bnxt_re_cq *cq;
  2271. struct bnxt_qplib_nq *nq;
  2272. struct bnxt_re_dev *rdev;
  2273. cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
  2274. rdev = cq->rdev;
  2275. nq = cq->qplib_cq.nq;
  2276. rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
  2277. if (rc) {
  2278. dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
  2279. return rc;
  2280. }
  2281. if (!IS_ERR_OR_NULL(cq->umem))
  2282. ib_umem_release(cq->umem);
  2283. atomic_dec(&rdev->cq_count);
  2284. nq->budget--;
  2285. kfree(cq->cql);
  2286. kfree(cq);
  2287. return 0;
  2288. }
  2289. struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
  2290. const struct ib_cq_init_attr *attr,
  2291. struct ib_ucontext *context,
  2292. struct ib_udata *udata)
  2293. {
  2294. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  2295. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  2296. struct bnxt_re_cq *cq = NULL;
  2297. int rc, entries;
  2298. int cqe = attr->cqe;
  2299. struct bnxt_qplib_nq *nq = NULL;
  2300. unsigned int nq_alloc_cnt;
  2301. /* Validate CQ fields */
  2302. if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
  2303. dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
  2304. return ERR_PTR(-EINVAL);
  2305. }
  2306. cq = kzalloc(sizeof(*cq), GFP_KERNEL);
  2307. if (!cq)
  2308. return ERR_PTR(-ENOMEM);
  2309. cq->rdev = rdev;
  2310. cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
  2311. entries = roundup_pow_of_two(cqe + 1);
  2312. if (entries > dev_attr->max_cq_wqes + 1)
  2313. entries = dev_attr->max_cq_wqes + 1;
  2314. if (context) {
  2315. struct bnxt_re_cq_req req;
  2316. struct bnxt_re_ucontext *uctx = container_of
  2317. (context,
  2318. struct bnxt_re_ucontext,
  2319. ib_uctx);
  2320. if (ib_copy_from_udata(&req, udata, sizeof(req))) {
  2321. rc = -EFAULT;
  2322. goto fail;
  2323. }
  2324. cq->umem = ib_umem_get(context, req.cq_va,
  2325. entries * sizeof(struct cq_base),
  2326. IB_ACCESS_LOCAL_WRITE, 1);
  2327. if (IS_ERR(cq->umem)) {
  2328. rc = PTR_ERR(cq->umem);
  2329. goto fail;
  2330. }
  2331. cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
  2332. cq->qplib_cq.nmap = cq->umem->nmap;
  2333. cq->qplib_cq.dpi = &uctx->dpi;
  2334. } else {
  2335. cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
  2336. cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
  2337. GFP_KERNEL);
  2338. if (!cq->cql) {
  2339. rc = -ENOMEM;
  2340. goto fail;
  2341. }
  2342. cq->qplib_cq.dpi = &rdev->dpi_privileged;
  2343. cq->qplib_cq.sghead = NULL;
  2344. cq->qplib_cq.nmap = 0;
  2345. }
  2346. /*
  2347. * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
  2348. * used for getting the NQ index.
  2349. */
  2350. nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
  2351. nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
  2352. cq->qplib_cq.max_wqe = entries;
  2353. cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
  2354. cq->qplib_cq.nq = nq;
  2355. rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
  2356. if (rc) {
  2357. dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
  2358. goto fail;
  2359. }
  2360. cq->ib_cq.cqe = entries;
  2361. cq->cq_period = cq->qplib_cq.period;
  2362. nq->budget++;
  2363. atomic_inc(&rdev->cq_count);
  2364. if (context) {
  2365. struct bnxt_re_cq_resp resp;
  2366. resp.cqid = cq->qplib_cq.id;
  2367. resp.tail = cq->qplib_cq.hwq.cons;
  2368. resp.phase = cq->qplib_cq.period;
  2369. resp.rsvd = 0;
  2370. rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
  2371. if (rc) {
  2372. dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
  2373. bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
  2374. goto c2fail;
  2375. }
  2376. }
  2377. return &cq->ib_cq;
  2378. c2fail:
  2379. if (context)
  2380. ib_umem_release(cq->umem);
  2381. fail:
  2382. kfree(cq->cql);
  2383. kfree(cq);
  2384. return ERR_PTR(rc);
  2385. }
  2386. static u8 __req_to_ib_wc_status(u8 qstatus)
  2387. {
  2388. switch (qstatus) {
  2389. case CQ_REQ_STATUS_OK:
  2390. return IB_WC_SUCCESS;
  2391. case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
  2392. return IB_WC_BAD_RESP_ERR;
  2393. case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
  2394. return IB_WC_LOC_LEN_ERR;
  2395. case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
  2396. return IB_WC_LOC_QP_OP_ERR;
  2397. case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
  2398. return IB_WC_LOC_PROT_ERR;
  2399. case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
  2400. return IB_WC_GENERAL_ERR;
  2401. case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
  2402. return IB_WC_REM_INV_REQ_ERR;
  2403. case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
  2404. return IB_WC_REM_ACCESS_ERR;
  2405. case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
  2406. return IB_WC_REM_OP_ERR;
  2407. case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
  2408. return IB_WC_RNR_RETRY_EXC_ERR;
  2409. case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
  2410. return IB_WC_RETRY_EXC_ERR;
  2411. case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
  2412. return IB_WC_WR_FLUSH_ERR;
  2413. default:
  2414. return IB_WC_GENERAL_ERR;
  2415. }
  2416. return 0;
  2417. }
  2418. static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
  2419. {
  2420. switch (qstatus) {
  2421. case CQ_RES_RAWETH_QP1_STATUS_OK:
  2422. return IB_WC_SUCCESS;
  2423. case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
  2424. return IB_WC_LOC_ACCESS_ERR;
  2425. case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
  2426. return IB_WC_LOC_LEN_ERR;
  2427. case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
  2428. return IB_WC_LOC_PROT_ERR;
  2429. case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
  2430. return IB_WC_LOC_QP_OP_ERR;
  2431. case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
  2432. return IB_WC_GENERAL_ERR;
  2433. case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
  2434. return IB_WC_WR_FLUSH_ERR;
  2435. case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
  2436. return IB_WC_WR_FLUSH_ERR;
  2437. default:
  2438. return IB_WC_GENERAL_ERR;
  2439. }
  2440. }
  2441. static u8 __rc_to_ib_wc_status(u8 qstatus)
  2442. {
  2443. switch (qstatus) {
  2444. case CQ_RES_RC_STATUS_OK:
  2445. return IB_WC_SUCCESS;
  2446. case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
  2447. return IB_WC_LOC_ACCESS_ERR;
  2448. case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
  2449. return IB_WC_LOC_LEN_ERR;
  2450. case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
  2451. return IB_WC_LOC_PROT_ERR;
  2452. case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
  2453. return IB_WC_LOC_QP_OP_ERR;
  2454. case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
  2455. return IB_WC_GENERAL_ERR;
  2456. case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
  2457. return IB_WC_REM_INV_REQ_ERR;
  2458. case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
  2459. return IB_WC_WR_FLUSH_ERR;
  2460. case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
  2461. return IB_WC_WR_FLUSH_ERR;
  2462. default:
  2463. return IB_WC_GENERAL_ERR;
  2464. }
  2465. }
  2466. static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
  2467. {
  2468. switch (cqe->type) {
  2469. case BNXT_QPLIB_SWQE_TYPE_SEND:
  2470. wc->opcode = IB_WC_SEND;
  2471. break;
  2472. case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
  2473. wc->opcode = IB_WC_SEND;
  2474. wc->wc_flags |= IB_WC_WITH_IMM;
  2475. break;
  2476. case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
  2477. wc->opcode = IB_WC_SEND;
  2478. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  2479. break;
  2480. case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
  2481. wc->opcode = IB_WC_RDMA_WRITE;
  2482. break;
  2483. case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
  2484. wc->opcode = IB_WC_RDMA_WRITE;
  2485. wc->wc_flags |= IB_WC_WITH_IMM;
  2486. break;
  2487. case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
  2488. wc->opcode = IB_WC_RDMA_READ;
  2489. break;
  2490. case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
  2491. wc->opcode = IB_WC_COMP_SWAP;
  2492. break;
  2493. case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
  2494. wc->opcode = IB_WC_FETCH_ADD;
  2495. break;
  2496. case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
  2497. wc->opcode = IB_WC_LOCAL_INV;
  2498. break;
  2499. case BNXT_QPLIB_SWQE_TYPE_REG_MR:
  2500. wc->opcode = IB_WC_REG_MR;
  2501. break;
  2502. default:
  2503. wc->opcode = IB_WC_SEND;
  2504. break;
  2505. }
  2506. wc->status = __req_to_ib_wc_status(cqe->status);
  2507. }
  2508. static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
  2509. u16 raweth_qp1_flags2)
  2510. {
  2511. bool is_ipv6 = false, is_ipv4 = false;
  2512. /* raweth_qp1_flags Bit 9-6 indicates itype */
  2513. if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
  2514. != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
  2515. return -1;
  2516. if (raweth_qp1_flags2 &
  2517. CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
  2518. raweth_qp1_flags2 &
  2519. CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
  2520. /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
  2521. (raweth_qp1_flags2 &
  2522. CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
  2523. (is_ipv6 = true) : (is_ipv4 = true);
  2524. return ((is_ipv6) ?
  2525. BNXT_RE_ROCEV2_IPV6_PACKET :
  2526. BNXT_RE_ROCEV2_IPV4_PACKET);
  2527. } else {
  2528. return BNXT_RE_ROCE_V1_PACKET;
  2529. }
  2530. }
  2531. static int bnxt_re_to_ib_nw_type(int nw_type)
  2532. {
  2533. u8 nw_hdr_type = 0xFF;
  2534. switch (nw_type) {
  2535. case BNXT_RE_ROCE_V1_PACKET:
  2536. nw_hdr_type = RDMA_NETWORK_ROCE_V1;
  2537. break;
  2538. case BNXT_RE_ROCEV2_IPV4_PACKET:
  2539. nw_hdr_type = RDMA_NETWORK_IPV4;
  2540. break;
  2541. case BNXT_RE_ROCEV2_IPV6_PACKET:
  2542. nw_hdr_type = RDMA_NETWORK_IPV6;
  2543. break;
  2544. }
  2545. return nw_hdr_type;
  2546. }
  2547. static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
  2548. void *rq_hdr_buf)
  2549. {
  2550. u8 *tmp_buf = NULL;
  2551. struct ethhdr *eth_hdr;
  2552. u16 eth_type;
  2553. bool rc = false;
  2554. tmp_buf = (u8 *)rq_hdr_buf;
  2555. /*
  2556. * If dest mac is not same as I/F mac, this could be a
  2557. * loopback address or multicast address, check whether
  2558. * it is a loopback packet
  2559. */
  2560. if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
  2561. tmp_buf += 4;
  2562. /* Check the ether type */
  2563. eth_hdr = (struct ethhdr *)tmp_buf;
  2564. eth_type = ntohs(eth_hdr->h_proto);
  2565. switch (eth_type) {
  2566. case ETH_P_IBOE:
  2567. rc = true;
  2568. break;
  2569. case ETH_P_IP:
  2570. case ETH_P_IPV6: {
  2571. u32 len;
  2572. struct udphdr *udp_hdr;
  2573. len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
  2574. sizeof(struct ipv6hdr));
  2575. tmp_buf += sizeof(struct ethhdr) + len;
  2576. udp_hdr = (struct udphdr *)tmp_buf;
  2577. if (ntohs(udp_hdr->dest) ==
  2578. ROCE_V2_UDP_DPORT)
  2579. rc = true;
  2580. break;
  2581. }
  2582. default:
  2583. break;
  2584. }
  2585. }
  2586. return rc;
  2587. }
  2588. static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
  2589. struct bnxt_qplib_cqe *cqe)
  2590. {
  2591. struct bnxt_re_dev *rdev = qp1_qp->rdev;
  2592. struct bnxt_re_sqp_entries *sqp_entry = NULL;
  2593. struct bnxt_re_qp *qp = rdev->qp1_sqp;
  2594. struct ib_send_wr *swr;
  2595. struct ib_ud_wr udwr;
  2596. struct ib_recv_wr rwr;
  2597. int pkt_type = 0;
  2598. u32 tbl_idx;
  2599. void *rq_hdr_buf;
  2600. dma_addr_t rq_hdr_buf_map;
  2601. dma_addr_t shrq_hdr_buf_map;
  2602. u32 offset = 0;
  2603. u32 skip_bytes = 0;
  2604. struct ib_sge s_sge[2];
  2605. struct ib_sge r_sge[2];
  2606. int rc;
  2607. memset(&udwr, 0, sizeof(udwr));
  2608. memset(&rwr, 0, sizeof(rwr));
  2609. memset(&s_sge, 0, sizeof(s_sge));
  2610. memset(&r_sge, 0, sizeof(r_sge));
  2611. swr = &udwr.wr;
  2612. tbl_idx = cqe->wr_id;
  2613. rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
  2614. (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
  2615. rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
  2616. tbl_idx);
  2617. /* Shadow QP header buffer */
  2618. shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
  2619. tbl_idx);
  2620. sqp_entry = &rdev->sqp_tbl[tbl_idx];
  2621. /* Store this cqe */
  2622. memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
  2623. sqp_entry->qp1_qp = qp1_qp;
  2624. /* Find packet type from the cqe */
  2625. pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
  2626. cqe->raweth_qp1_flags2);
  2627. if (pkt_type < 0) {
  2628. dev_err(rdev_to_dev(rdev), "Invalid packet\n");
  2629. return -EINVAL;
  2630. }
  2631. /* Adjust the offset for the user buffer and post in the rq */
  2632. if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
  2633. offset = 20;
  2634. /*
  2635. * QP1 loopback packet has 4 bytes of internal header before
  2636. * ether header. Skip these four bytes.
  2637. */
  2638. if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
  2639. skip_bytes = 4;
  2640. /* First send SGE . Skip the ether header*/
  2641. s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
  2642. + skip_bytes;
  2643. s_sge[0].lkey = 0xFFFFFFFF;
  2644. s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
  2645. BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
  2646. /* Second Send SGE */
  2647. s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
  2648. BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
  2649. if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
  2650. s_sge[1].addr += 8;
  2651. s_sge[1].lkey = 0xFFFFFFFF;
  2652. s_sge[1].length = 256;
  2653. /* First recv SGE */
  2654. r_sge[0].addr = shrq_hdr_buf_map;
  2655. r_sge[0].lkey = 0xFFFFFFFF;
  2656. r_sge[0].length = 40;
  2657. r_sge[1].addr = sqp_entry->sge.addr + offset;
  2658. r_sge[1].lkey = sqp_entry->sge.lkey;
  2659. r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
  2660. /* Create receive work request */
  2661. rwr.num_sge = 2;
  2662. rwr.sg_list = r_sge;
  2663. rwr.wr_id = tbl_idx;
  2664. rwr.next = NULL;
  2665. rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
  2666. if (rc) {
  2667. dev_err(rdev_to_dev(rdev),
  2668. "Failed to post Rx buffers to shadow QP");
  2669. return -ENOMEM;
  2670. }
  2671. swr->num_sge = 2;
  2672. swr->sg_list = s_sge;
  2673. swr->wr_id = tbl_idx;
  2674. swr->opcode = IB_WR_SEND;
  2675. swr->next = NULL;
  2676. udwr.ah = &rdev->sqp_ah->ib_ah;
  2677. udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
  2678. udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
  2679. /* post data received in the send queue */
  2680. rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
  2681. return 0;
  2682. }
  2683. static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
  2684. struct bnxt_qplib_cqe *cqe)
  2685. {
  2686. wc->opcode = IB_WC_RECV;
  2687. wc->status = __rawqp1_to_ib_wc_status(cqe->status);
  2688. wc->wc_flags |= IB_WC_GRH;
  2689. }
  2690. static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
  2691. u16 *vid, u8 *sl)
  2692. {
  2693. bool ret = false;
  2694. u32 metadata;
  2695. u16 tpid;
  2696. metadata = orig_cqe->raweth_qp1_metadata;
  2697. if (orig_cqe->raweth_qp1_flags2 &
  2698. CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
  2699. tpid = ((metadata &
  2700. CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
  2701. CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
  2702. if (tpid == ETH_P_8021Q) {
  2703. *vid = metadata &
  2704. CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
  2705. *sl = (metadata &
  2706. CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
  2707. CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
  2708. ret = true;
  2709. }
  2710. }
  2711. return ret;
  2712. }
  2713. static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
  2714. struct bnxt_qplib_cqe *cqe)
  2715. {
  2716. wc->opcode = IB_WC_RECV;
  2717. wc->status = __rc_to_ib_wc_status(cqe->status);
  2718. if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
  2719. wc->wc_flags |= IB_WC_WITH_IMM;
  2720. if (cqe->flags & CQ_RES_RC_FLAGS_INV)
  2721. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  2722. if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
  2723. (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
  2724. wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
  2725. }
  2726. static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
  2727. struct ib_wc *wc,
  2728. struct bnxt_qplib_cqe *cqe)
  2729. {
  2730. struct bnxt_re_dev *rdev = qp->rdev;
  2731. struct bnxt_re_qp *qp1_qp = NULL;
  2732. struct bnxt_qplib_cqe *orig_cqe = NULL;
  2733. struct bnxt_re_sqp_entries *sqp_entry = NULL;
  2734. int nw_type;
  2735. u32 tbl_idx;
  2736. u16 vlan_id;
  2737. u8 sl;
  2738. tbl_idx = cqe->wr_id;
  2739. sqp_entry = &rdev->sqp_tbl[tbl_idx];
  2740. qp1_qp = sqp_entry->qp1_qp;
  2741. orig_cqe = &sqp_entry->cqe;
  2742. wc->wr_id = sqp_entry->wrid;
  2743. wc->byte_len = orig_cqe->length;
  2744. wc->qp = &qp1_qp->ib_qp;
  2745. wc->ex.imm_data = orig_cqe->immdata;
  2746. wc->src_qp = orig_cqe->src_qp;
  2747. memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
  2748. if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
  2749. wc->vlan_id = vlan_id;
  2750. wc->sl = sl;
  2751. wc->wc_flags |= IB_WC_WITH_VLAN;
  2752. }
  2753. wc->port_num = 1;
  2754. wc->vendor_err = orig_cqe->status;
  2755. wc->opcode = IB_WC_RECV;
  2756. wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
  2757. wc->wc_flags |= IB_WC_GRH;
  2758. nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
  2759. orig_cqe->raweth_qp1_flags2);
  2760. if (nw_type >= 0) {
  2761. wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
  2762. wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
  2763. }
  2764. }
  2765. static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
  2766. struct bnxt_qplib_cqe *cqe)
  2767. {
  2768. wc->opcode = IB_WC_RECV;
  2769. wc->status = __rc_to_ib_wc_status(cqe->status);
  2770. if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
  2771. wc->wc_flags |= IB_WC_WITH_IMM;
  2772. if (cqe->flags & CQ_RES_RC_FLAGS_INV)
  2773. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  2774. if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
  2775. (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
  2776. wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
  2777. }
  2778. static int send_phantom_wqe(struct bnxt_re_qp *qp)
  2779. {
  2780. struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
  2781. unsigned long flags;
  2782. int rc = 0;
  2783. spin_lock_irqsave(&qp->sq_lock, flags);
  2784. rc = bnxt_re_bind_fence_mw(lib_qp);
  2785. if (!rc) {
  2786. lib_qp->sq.phantom_wqe_cnt++;
  2787. dev_dbg(&lib_qp->sq.hwq.pdev->dev,
  2788. "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
  2789. lib_qp->id, lib_qp->sq.hwq.prod,
  2790. HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
  2791. lib_qp->sq.phantom_wqe_cnt);
  2792. }
  2793. spin_unlock_irqrestore(&qp->sq_lock, flags);
  2794. return rc;
  2795. }
  2796. int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
  2797. {
  2798. struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
  2799. struct bnxt_re_qp *qp;
  2800. struct bnxt_qplib_cqe *cqe;
  2801. int i, ncqe, budget;
  2802. struct bnxt_qplib_q *sq;
  2803. struct bnxt_qplib_qp *lib_qp;
  2804. u32 tbl_idx;
  2805. struct bnxt_re_sqp_entries *sqp_entry = NULL;
  2806. unsigned long flags;
  2807. spin_lock_irqsave(&cq->cq_lock, flags);
  2808. budget = min_t(u32, num_entries, cq->max_cql);
  2809. num_entries = budget;
  2810. if (!cq->cql) {
  2811. dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
  2812. goto exit;
  2813. }
  2814. cqe = &cq->cql[0];
  2815. while (budget) {
  2816. lib_qp = NULL;
  2817. ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
  2818. if (lib_qp) {
  2819. sq = &lib_qp->sq;
  2820. if (sq->send_phantom) {
  2821. qp = container_of(lib_qp,
  2822. struct bnxt_re_qp, qplib_qp);
  2823. if (send_phantom_wqe(qp) == -ENOMEM)
  2824. dev_err(rdev_to_dev(cq->rdev),
  2825. "Phantom failed! Scheduled to send again\n");
  2826. else
  2827. sq->send_phantom = false;
  2828. }
  2829. }
  2830. if (ncqe < budget)
  2831. ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
  2832. cqe + ncqe,
  2833. budget - ncqe);
  2834. if (!ncqe)
  2835. break;
  2836. for (i = 0; i < ncqe; i++, cqe++) {
  2837. /* Transcribe each qplib_wqe back to ib_wc */
  2838. memset(wc, 0, sizeof(*wc));
  2839. wc->wr_id = cqe->wr_id;
  2840. wc->byte_len = cqe->length;
  2841. qp = container_of
  2842. ((struct bnxt_qplib_qp *)
  2843. (unsigned long)(cqe->qp_handle),
  2844. struct bnxt_re_qp, qplib_qp);
  2845. if (!qp) {
  2846. dev_err(rdev_to_dev(cq->rdev),
  2847. "POLL CQ : bad QP handle");
  2848. continue;
  2849. }
  2850. wc->qp = &qp->ib_qp;
  2851. wc->ex.imm_data = cqe->immdata;
  2852. wc->src_qp = cqe->src_qp;
  2853. memcpy(wc->smac, cqe->smac, ETH_ALEN);
  2854. wc->port_num = 1;
  2855. wc->vendor_err = cqe->status;
  2856. switch (cqe->opcode) {
  2857. case CQ_BASE_CQE_TYPE_REQ:
  2858. if (qp->qplib_qp.id ==
  2859. qp->rdev->qp1_sqp->qplib_qp.id) {
  2860. /* Handle this completion with
  2861. * the stored completion
  2862. */
  2863. memset(wc, 0, sizeof(*wc));
  2864. continue;
  2865. }
  2866. bnxt_re_process_req_wc(wc, cqe);
  2867. break;
  2868. case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
  2869. if (!cqe->status) {
  2870. int rc = 0;
  2871. rc = bnxt_re_process_raw_qp_pkt_rx
  2872. (qp, cqe);
  2873. if (!rc) {
  2874. memset(wc, 0, sizeof(*wc));
  2875. continue;
  2876. }
  2877. cqe->status = -1;
  2878. }
  2879. /* Errors need not be looped back.
  2880. * But change the wr_id to the one
  2881. * stored in the table
  2882. */
  2883. tbl_idx = cqe->wr_id;
  2884. sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
  2885. wc->wr_id = sqp_entry->wrid;
  2886. bnxt_re_process_res_rawqp1_wc(wc, cqe);
  2887. break;
  2888. case CQ_BASE_CQE_TYPE_RES_RC:
  2889. bnxt_re_process_res_rc_wc(wc, cqe);
  2890. break;
  2891. case CQ_BASE_CQE_TYPE_RES_UD:
  2892. if (qp->qplib_qp.id ==
  2893. qp->rdev->qp1_sqp->qplib_qp.id) {
  2894. /* Handle this completion with
  2895. * the stored completion
  2896. */
  2897. if (cqe->status) {
  2898. continue;
  2899. } else {
  2900. bnxt_re_process_res_shadow_qp_wc
  2901. (qp, wc, cqe);
  2902. break;
  2903. }
  2904. }
  2905. bnxt_re_process_res_ud_wc(wc, cqe);
  2906. break;
  2907. default:
  2908. dev_err(rdev_to_dev(cq->rdev),
  2909. "POLL CQ : type 0x%x not handled",
  2910. cqe->opcode);
  2911. continue;
  2912. }
  2913. wc++;
  2914. budget--;
  2915. }
  2916. }
  2917. exit:
  2918. spin_unlock_irqrestore(&cq->cq_lock, flags);
  2919. return num_entries - budget;
  2920. }
  2921. int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
  2922. enum ib_cq_notify_flags ib_cqn_flags)
  2923. {
  2924. struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
  2925. int type = 0, rc = 0;
  2926. unsigned long flags;
  2927. spin_lock_irqsave(&cq->cq_lock, flags);
  2928. /* Trigger on the very next completion */
  2929. if (ib_cqn_flags & IB_CQ_NEXT_COMP)
  2930. type = DBR_DBR_TYPE_CQ_ARMALL;
  2931. /* Trigger on the next solicited completion */
  2932. else if (ib_cqn_flags & IB_CQ_SOLICITED)
  2933. type = DBR_DBR_TYPE_CQ_ARMSE;
  2934. /* Poll to see if there are missed events */
  2935. if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
  2936. !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
  2937. rc = 1;
  2938. goto exit;
  2939. }
  2940. bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
  2941. exit:
  2942. spin_unlock_irqrestore(&cq->cq_lock, flags);
  2943. return rc;
  2944. }
  2945. /* Memory Regions */
  2946. struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
  2947. {
  2948. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  2949. struct bnxt_re_dev *rdev = pd->rdev;
  2950. struct bnxt_re_mr *mr;
  2951. u64 pbl = 0;
  2952. int rc;
  2953. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  2954. if (!mr)
  2955. return ERR_PTR(-ENOMEM);
  2956. mr->rdev = rdev;
  2957. mr->qplib_mr.pd = &pd->qplib_pd;
  2958. mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
  2959. mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
  2960. /* Allocate and register 0 as the address */
  2961. rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
  2962. if (rc)
  2963. goto fail;
  2964. mr->qplib_mr.hwq.level = PBL_LVL_MAX;
  2965. mr->qplib_mr.total_size = -1; /* Infinte length */
  2966. rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
  2967. PAGE_SIZE);
  2968. if (rc)
  2969. goto fail_mr;
  2970. mr->ib_mr.lkey = mr->qplib_mr.lkey;
  2971. if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
  2972. IB_ACCESS_REMOTE_ATOMIC))
  2973. mr->ib_mr.rkey = mr->ib_mr.lkey;
  2974. atomic_inc(&rdev->mr_count);
  2975. return &mr->ib_mr;
  2976. fail_mr:
  2977. bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
  2978. fail:
  2979. kfree(mr);
  2980. return ERR_PTR(rc);
  2981. }
  2982. int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
  2983. {
  2984. struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
  2985. struct bnxt_re_dev *rdev = mr->rdev;
  2986. int rc;
  2987. rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
  2988. if (rc)
  2989. dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
  2990. if (mr->pages) {
  2991. rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
  2992. &mr->qplib_frpl);
  2993. kfree(mr->pages);
  2994. mr->npages = 0;
  2995. mr->pages = NULL;
  2996. }
  2997. if (!IS_ERR_OR_NULL(mr->ib_umem))
  2998. ib_umem_release(mr->ib_umem);
  2999. kfree(mr);
  3000. atomic_dec(&rdev->mr_count);
  3001. return rc;
  3002. }
  3003. static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
  3004. {
  3005. struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
  3006. if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
  3007. return -ENOMEM;
  3008. mr->pages[mr->npages++] = addr;
  3009. return 0;
  3010. }
  3011. int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
  3012. unsigned int *sg_offset)
  3013. {
  3014. struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
  3015. mr->npages = 0;
  3016. return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
  3017. }
  3018. struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
  3019. u32 max_num_sg)
  3020. {
  3021. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  3022. struct bnxt_re_dev *rdev = pd->rdev;
  3023. struct bnxt_re_mr *mr = NULL;
  3024. int rc;
  3025. if (type != IB_MR_TYPE_MEM_REG) {
  3026. dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
  3027. return ERR_PTR(-EINVAL);
  3028. }
  3029. if (max_num_sg > MAX_PBL_LVL_1_PGS)
  3030. return ERR_PTR(-EINVAL);
  3031. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  3032. if (!mr)
  3033. return ERR_PTR(-ENOMEM);
  3034. mr->rdev = rdev;
  3035. mr->qplib_mr.pd = &pd->qplib_pd;
  3036. mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
  3037. mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
  3038. rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
  3039. if (rc)
  3040. goto bail;
  3041. mr->ib_mr.lkey = mr->qplib_mr.lkey;
  3042. mr->ib_mr.rkey = mr->ib_mr.lkey;
  3043. mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
  3044. if (!mr->pages) {
  3045. rc = -ENOMEM;
  3046. goto fail;
  3047. }
  3048. rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
  3049. &mr->qplib_frpl, max_num_sg);
  3050. if (rc) {
  3051. dev_err(rdev_to_dev(rdev),
  3052. "Failed to allocate HW FR page list");
  3053. goto fail_mr;
  3054. }
  3055. atomic_inc(&rdev->mr_count);
  3056. return &mr->ib_mr;
  3057. fail_mr:
  3058. kfree(mr->pages);
  3059. fail:
  3060. bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
  3061. bail:
  3062. kfree(mr);
  3063. return ERR_PTR(rc);
  3064. }
  3065. struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
  3066. struct ib_udata *udata)
  3067. {
  3068. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  3069. struct bnxt_re_dev *rdev = pd->rdev;
  3070. struct bnxt_re_mw *mw;
  3071. int rc;
  3072. mw = kzalloc(sizeof(*mw), GFP_KERNEL);
  3073. if (!mw)
  3074. return ERR_PTR(-ENOMEM);
  3075. mw->rdev = rdev;
  3076. mw->qplib_mw.pd = &pd->qplib_pd;
  3077. mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
  3078. CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
  3079. CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
  3080. rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
  3081. if (rc) {
  3082. dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
  3083. goto fail;
  3084. }
  3085. mw->ib_mw.rkey = mw->qplib_mw.rkey;
  3086. atomic_inc(&rdev->mw_count);
  3087. return &mw->ib_mw;
  3088. fail:
  3089. kfree(mw);
  3090. return ERR_PTR(rc);
  3091. }
  3092. int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
  3093. {
  3094. struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
  3095. struct bnxt_re_dev *rdev = mw->rdev;
  3096. int rc;
  3097. rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
  3098. if (rc) {
  3099. dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
  3100. return rc;
  3101. }
  3102. kfree(mw);
  3103. atomic_dec(&rdev->mw_count);
  3104. return rc;
  3105. }
  3106. static int bnxt_re_page_size_ok(int page_shift)
  3107. {
  3108. switch (page_shift) {
  3109. case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
  3110. case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
  3111. case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
  3112. case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
  3113. case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
  3114. case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
  3115. case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
  3116. case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
  3117. return 1;
  3118. default:
  3119. return 0;
  3120. }
  3121. }
  3122. static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
  3123. int page_shift)
  3124. {
  3125. u64 *pbl_tbl = pbl_tbl_orig;
  3126. u64 paddr;
  3127. u64 page_mask = (1ULL << page_shift) - 1;
  3128. int i, pages;
  3129. struct scatterlist *sg;
  3130. int entry;
  3131. for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
  3132. pages = sg_dma_len(sg) >> PAGE_SHIFT;
  3133. for (i = 0; i < pages; i++) {
  3134. paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
  3135. if (pbl_tbl == pbl_tbl_orig)
  3136. *pbl_tbl++ = paddr & ~page_mask;
  3137. else if ((paddr & page_mask) == 0)
  3138. *pbl_tbl++ = paddr;
  3139. }
  3140. }
  3141. return pbl_tbl - pbl_tbl_orig;
  3142. }
  3143. /* uverbs */
  3144. struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
  3145. u64 virt_addr, int mr_access_flags,
  3146. struct ib_udata *udata)
  3147. {
  3148. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  3149. struct bnxt_re_dev *rdev = pd->rdev;
  3150. struct bnxt_re_mr *mr;
  3151. struct ib_umem *umem;
  3152. u64 *pbl_tbl = NULL;
  3153. int umem_pgs, page_shift, rc;
  3154. if (length > BNXT_RE_MAX_MR_SIZE) {
  3155. dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
  3156. length, BNXT_RE_MAX_MR_SIZE);
  3157. return ERR_PTR(-ENOMEM);
  3158. }
  3159. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  3160. if (!mr)
  3161. return ERR_PTR(-ENOMEM);
  3162. mr->rdev = rdev;
  3163. mr->qplib_mr.pd = &pd->qplib_pd;
  3164. mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
  3165. mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
  3166. rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
  3167. if (rc) {
  3168. dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
  3169. goto free_mr;
  3170. }
  3171. /* The fixed portion of the rkey is the same as the lkey */
  3172. mr->ib_mr.rkey = mr->qplib_mr.rkey;
  3173. umem = ib_umem_get(ib_pd->uobject->context, start, length,
  3174. mr_access_flags, 0);
  3175. if (IS_ERR(umem)) {
  3176. dev_err(rdev_to_dev(rdev), "Failed to get umem");
  3177. rc = -EFAULT;
  3178. goto free_mrw;
  3179. }
  3180. mr->ib_umem = umem;
  3181. mr->qplib_mr.va = virt_addr;
  3182. umem_pgs = ib_umem_page_count(umem);
  3183. if (!umem_pgs) {
  3184. dev_err(rdev_to_dev(rdev), "umem is invalid!");
  3185. rc = -EINVAL;
  3186. goto free_umem;
  3187. }
  3188. mr->qplib_mr.total_size = length;
  3189. pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
  3190. if (!pbl_tbl) {
  3191. rc = -ENOMEM;
  3192. goto free_umem;
  3193. }
  3194. page_shift = umem->page_shift;
  3195. if (!bnxt_re_page_size_ok(page_shift)) {
  3196. dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
  3197. rc = -EFAULT;
  3198. goto fail;
  3199. }
  3200. if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
  3201. dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
  3202. length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
  3203. rc = -EINVAL;
  3204. goto fail;
  3205. }
  3206. if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
  3207. page_shift = BNXT_RE_PAGE_SHIFT_2M;
  3208. dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
  3209. 1 << page_shift);
  3210. }
  3211. /* Map umem buf ptrs to the PBL */
  3212. umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
  3213. rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
  3214. umem_pgs, false, 1 << page_shift);
  3215. if (rc) {
  3216. dev_err(rdev_to_dev(rdev), "Failed to register user MR");
  3217. goto fail;
  3218. }
  3219. kfree(pbl_tbl);
  3220. mr->ib_mr.lkey = mr->qplib_mr.lkey;
  3221. mr->ib_mr.rkey = mr->qplib_mr.lkey;
  3222. atomic_inc(&rdev->mr_count);
  3223. return &mr->ib_mr;
  3224. fail:
  3225. kfree(pbl_tbl);
  3226. free_umem:
  3227. ib_umem_release(umem);
  3228. free_mrw:
  3229. bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
  3230. free_mr:
  3231. kfree(mr);
  3232. return ERR_PTR(rc);
  3233. }
  3234. struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
  3235. struct ib_udata *udata)
  3236. {
  3237. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  3238. struct bnxt_re_uctx_resp resp;
  3239. struct bnxt_re_ucontext *uctx;
  3240. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  3241. int rc;
  3242. dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
  3243. ibdev->uverbs_abi_ver);
  3244. if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
  3245. dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
  3246. BNXT_RE_ABI_VERSION);
  3247. return ERR_PTR(-EPERM);
  3248. }
  3249. uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
  3250. if (!uctx)
  3251. return ERR_PTR(-ENOMEM);
  3252. uctx->rdev = rdev;
  3253. uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
  3254. if (!uctx->shpg) {
  3255. rc = -ENOMEM;
  3256. goto fail;
  3257. }
  3258. spin_lock_init(&uctx->sh_lock);
  3259. resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
  3260. resp.max_qp = rdev->qplib_ctx.qpc_count;
  3261. resp.pg_size = PAGE_SIZE;
  3262. resp.cqe_sz = sizeof(struct cq_base);
  3263. resp.max_cqd = dev_attr->max_cq_wqes;
  3264. resp.rsvd = 0;
  3265. rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
  3266. if (rc) {
  3267. dev_err(rdev_to_dev(rdev), "Failed to copy user context");
  3268. rc = -EFAULT;
  3269. goto cfail;
  3270. }
  3271. return &uctx->ib_uctx;
  3272. cfail:
  3273. free_page((unsigned long)uctx->shpg);
  3274. uctx->shpg = NULL;
  3275. fail:
  3276. kfree(uctx);
  3277. return ERR_PTR(rc);
  3278. }
  3279. int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
  3280. {
  3281. struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
  3282. struct bnxt_re_ucontext,
  3283. ib_uctx);
  3284. struct bnxt_re_dev *rdev = uctx->rdev;
  3285. int rc = 0;
  3286. if (uctx->shpg)
  3287. free_page((unsigned long)uctx->shpg);
  3288. if (uctx->dpi.dbr) {
  3289. /* Free DPI only if this is the first PD allocated by the
  3290. * application and mark the context dpi as NULL
  3291. */
  3292. rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
  3293. &rdev->qplib_res.dpi_tbl,
  3294. &uctx->dpi);
  3295. if (rc)
  3296. dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
  3297. /* Don't fail, continue*/
  3298. uctx->dpi.dbr = NULL;
  3299. }
  3300. kfree(uctx);
  3301. return 0;
  3302. }
  3303. /* Helper function to mmap the virtual memory from user app */
  3304. int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
  3305. {
  3306. struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
  3307. struct bnxt_re_ucontext,
  3308. ib_uctx);
  3309. struct bnxt_re_dev *rdev = uctx->rdev;
  3310. u64 pfn;
  3311. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  3312. return -EINVAL;
  3313. if (vma->vm_pgoff) {
  3314. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  3315. if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  3316. PAGE_SIZE, vma->vm_page_prot)) {
  3317. dev_err(rdev_to_dev(rdev), "Failed to map DPI");
  3318. return -EAGAIN;
  3319. }
  3320. } else {
  3321. pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
  3322. if (remap_pfn_range(vma, vma->vm_start,
  3323. pfn, PAGE_SIZE, vma->vm_page_prot)) {
  3324. dev_err(rdev_to_dev(rdev),
  3325. "Failed to map shared page");
  3326. return -EAGAIN;
  3327. }
  3328. }
  3329. return 0;
  3330. }