ib_verbs.c 94 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468
  1. /*
  2. * Broadcom NetXtreme-E RoCE driver.
  3. *
  4. * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
  5. * Broadcom refers to Broadcom Limited and/or its subsidiaries.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or without
  14. * modification, are permitted provided that the following conditions
  15. * are met:
  16. *
  17. * 1. Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. * 2. Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in
  21. * the documentation and/or other materials provided with the
  22. * distribution.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. *
  36. * Description: IB Verbs interpreter
  37. */
  38. #include <linux/interrupt.h>
  39. #include <linux/types.h>
  40. #include <linux/pci.h>
  41. #include <linux/netdevice.h>
  42. #include <linux/if_ether.h>
  43. #include <rdma/ib_verbs.h>
  44. #include <rdma/ib_user_verbs.h>
  45. #include <rdma/ib_umem.h>
  46. #include <rdma/ib_addr.h>
  47. #include <rdma/ib_mad.h>
  48. #include <rdma/ib_cache.h>
  49. #include "bnxt_ulp.h"
  50. #include "roce_hsi.h"
  51. #include "qplib_res.h"
  52. #include "qplib_sp.h"
  53. #include "qplib_fp.h"
  54. #include "qplib_rcfw.h"
  55. #include "bnxt_re.h"
  56. #include "ib_verbs.h"
  57. #include <rdma/bnxt_re-abi.h>
  58. static int __from_ib_access_flags(int iflags)
  59. {
  60. int qflags = 0;
  61. if (iflags & IB_ACCESS_LOCAL_WRITE)
  62. qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
  63. if (iflags & IB_ACCESS_REMOTE_READ)
  64. qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
  65. if (iflags & IB_ACCESS_REMOTE_WRITE)
  66. qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
  67. if (iflags & IB_ACCESS_REMOTE_ATOMIC)
  68. qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
  69. if (iflags & IB_ACCESS_MW_BIND)
  70. qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
  71. if (iflags & IB_ZERO_BASED)
  72. qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
  73. if (iflags & IB_ACCESS_ON_DEMAND)
  74. qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
  75. return qflags;
  76. };
  77. static enum ib_access_flags __to_ib_access_flags(int qflags)
  78. {
  79. enum ib_access_flags iflags = 0;
  80. if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
  81. iflags |= IB_ACCESS_LOCAL_WRITE;
  82. if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
  83. iflags |= IB_ACCESS_REMOTE_WRITE;
  84. if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
  85. iflags |= IB_ACCESS_REMOTE_READ;
  86. if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
  87. iflags |= IB_ACCESS_REMOTE_ATOMIC;
  88. if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
  89. iflags |= IB_ACCESS_MW_BIND;
  90. if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
  91. iflags |= IB_ZERO_BASED;
  92. if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
  93. iflags |= IB_ACCESS_ON_DEMAND;
  94. return iflags;
  95. };
  96. static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
  97. struct bnxt_qplib_sge *sg_list, int num)
  98. {
  99. int i, total = 0;
  100. for (i = 0; i < num; i++) {
  101. sg_list[i].addr = ib_sg_list[i].addr;
  102. sg_list[i].lkey = ib_sg_list[i].lkey;
  103. sg_list[i].size = ib_sg_list[i].length;
  104. total += sg_list[i].size;
  105. }
  106. return total;
  107. }
  108. /* Device */
  109. struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
  110. {
  111. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  112. struct net_device *netdev = NULL;
  113. rcu_read_lock();
  114. if (rdev)
  115. netdev = rdev->netdev;
  116. if (netdev)
  117. dev_hold(netdev);
  118. rcu_read_unlock();
  119. return netdev;
  120. }
  121. int bnxt_re_query_device(struct ib_device *ibdev,
  122. struct ib_device_attr *ib_attr,
  123. struct ib_udata *udata)
  124. {
  125. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  126. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  127. memset(ib_attr, 0, sizeof(*ib_attr));
  128. ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
  129. bnxt_qplib_get_guid(rdev->netdev->dev_addr,
  130. (u8 *)&ib_attr->sys_image_guid);
  131. ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
  132. ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
  133. ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
  134. ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
  135. ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
  136. ib_attr->max_qp = dev_attr->max_qp;
  137. ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
  138. ib_attr->device_cap_flags =
  139. IB_DEVICE_CURR_QP_STATE_MOD
  140. | IB_DEVICE_RC_RNR_NAK_GEN
  141. | IB_DEVICE_SHUTDOWN_PORT
  142. | IB_DEVICE_SYS_IMAGE_GUID
  143. | IB_DEVICE_LOCAL_DMA_LKEY
  144. | IB_DEVICE_RESIZE_MAX_WR
  145. | IB_DEVICE_PORT_ACTIVE_EVENT
  146. | IB_DEVICE_N_NOTIFY_CQ
  147. | IB_DEVICE_MEM_WINDOW
  148. | IB_DEVICE_MEM_WINDOW_TYPE_2B
  149. | IB_DEVICE_MEM_MGT_EXTENSIONS;
  150. ib_attr->max_sge = dev_attr->max_qp_sges;
  151. ib_attr->max_sge_rd = dev_attr->max_qp_sges;
  152. ib_attr->max_cq = dev_attr->max_cq;
  153. ib_attr->max_cqe = dev_attr->max_cq_wqes;
  154. ib_attr->max_mr = dev_attr->max_mr;
  155. ib_attr->max_pd = dev_attr->max_pd;
  156. ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
  157. ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
  158. if (dev_attr->is_atomic) {
  159. ib_attr->atomic_cap = IB_ATOMIC_HCA;
  160. ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
  161. }
  162. ib_attr->max_ee_rd_atom = 0;
  163. ib_attr->max_res_rd_atom = 0;
  164. ib_attr->max_ee_init_rd_atom = 0;
  165. ib_attr->max_ee = 0;
  166. ib_attr->max_rdd = 0;
  167. ib_attr->max_mw = dev_attr->max_mw;
  168. ib_attr->max_raw_ipv6_qp = 0;
  169. ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
  170. ib_attr->max_mcast_grp = 0;
  171. ib_attr->max_mcast_qp_attach = 0;
  172. ib_attr->max_total_mcast_qp_attach = 0;
  173. ib_attr->max_ah = dev_attr->max_ah;
  174. ib_attr->max_fmr = 0;
  175. ib_attr->max_map_per_fmr = 0;
  176. ib_attr->max_srq = dev_attr->max_srq;
  177. ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
  178. ib_attr->max_srq_sge = dev_attr->max_srq_sges;
  179. ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
  180. ib_attr->max_pkeys = 1;
  181. ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
  182. return 0;
  183. }
  184. int bnxt_re_modify_device(struct ib_device *ibdev,
  185. int device_modify_mask,
  186. struct ib_device_modify *device_modify)
  187. {
  188. switch (device_modify_mask) {
  189. case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
  190. /* Modify the GUID requires the modification of the GID table */
  191. /* GUID should be made as READ-ONLY */
  192. break;
  193. case IB_DEVICE_MODIFY_NODE_DESC:
  194. /* Node Desc should be made as READ-ONLY */
  195. break;
  196. default:
  197. break;
  198. }
  199. return 0;
  200. }
  201. /* Port */
  202. int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
  203. struct ib_port_attr *port_attr)
  204. {
  205. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  206. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  207. memset(port_attr, 0, sizeof(*port_attr));
  208. if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
  209. port_attr->state = IB_PORT_ACTIVE;
  210. port_attr->phys_state = 5;
  211. } else {
  212. port_attr->state = IB_PORT_DOWN;
  213. port_attr->phys_state = 3;
  214. }
  215. port_attr->max_mtu = IB_MTU_4096;
  216. port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
  217. port_attr->gid_tbl_len = dev_attr->max_sgid;
  218. port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
  219. IB_PORT_DEVICE_MGMT_SUP |
  220. IB_PORT_VENDOR_CLASS_SUP |
  221. IB_PORT_IP_BASED_GIDS;
  222. /* Max MSG size set to 2G for now */
  223. port_attr->max_msg_sz = 0x80000000;
  224. port_attr->bad_pkey_cntr = 0;
  225. port_attr->qkey_viol_cntr = 0;
  226. port_attr->pkey_tbl_len = dev_attr->max_pkey;
  227. port_attr->lid = 0;
  228. port_attr->sm_lid = 0;
  229. port_attr->lmc = 0;
  230. port_attr->max_vl_num = 4;
  231. port_attr->sm_sl = 0;
  232. port_attr->subnet_timeout = 0;
  233. port_attr->init_type_reply = 0;
  234. port_attr->active_speed = rdev->active_speed;
  235. port_attr->active_width = rdev->active_width;
  236. return 0;
  237. }
  238. int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
  239. struct ib_port_immutable *immutable)
  240. {
  241. struct ib_port_attr port_attr;
  242. if (bnxt_re_query_port(ibdev, port_num, &port_attr))
  243. return -EINVAL;
  244. immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
  245. immutable->gid_tbl_len = port_attr.gid_tbl_len;
  246. immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
  247. immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
  248. immutable->max_mad_size = IB_MGMT_MAD_SIZE;
  249. return 0;
  250. }
  251. int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
  252. u16 index, u16 *pkey)
  253. {
  254. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  255. /* Ignore port_num */
  256. memset(pkey, 0, sizeof(*pkey));
  257. return bnxt_qplib_get_pkey(&rdev->qplib_res,
  258. &rdev->qplib_res.pkey_tbl, index, pkey);
  259. }
  260. int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
  261. int index, union ib_gid *gid)
  262. {
  263. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  264. int rc = 0;
  265. /* Ignore port_num */
  266. memset(gid, 0, sizeof(*gid));
  267. rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
  268. &rdev->qplib_res.sgid_tbl, index,
  269. (struct bnxt_qplib_gid *)gid);
  270. return rc;
  271. }
  272. int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
  273. unsigned int index, void **context)
  274. {
  275. int rc = 0;
  276. struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
  277. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  278. struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
  279. struct bnxt_qplib_gid *gid_to_del;
  280. /* Delete the entry from the hardware */
  281. ctx = *context;
  282. if (!ctx)
  283. return -EINVAL;
  284. if (sgid_tbl && sgid_tbl->active) {
  285. if (ctx->idx >= sgid_tbl->max)
  286. return -EINVAL;
  287. gid_to_del = &sgid_tbl->tbl[ctx->idx];
  288. /* DEL_GID is called in WQ context(netdevice_event_work_handler)
  289. * or via the ib_unregister_device path. In the former case QP1
  290. * may not be destroyed yet, in which case just return as FW
  291. * needs that entry to be present and will fail it's deletion.
  292. * We could get invoked again after QP1 is destroyed OR get an
  293. * ADD_GID call with a different GID value for the same index
  294. * where we issue MODIFY_GID cmd to update the GID entry -- TBD
  295. */
  296. if (ctx->idx == 0 &&
  297. rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
  298. ctx->refcnt == 1 && rdev->qp1_sqp) {
  299. dev_dbg(rdev_to_dev(rdev),
  300. "Trying to delete GID0 while QP1 is alive\n");
  301. return -EFAULT;
  302. }
  303. ctx->refcnt--;
  304. if (!ctx->refcnt) {
  305. rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
  306. if (rc) {
  307. dev_err(rdev_to_dev(rdev),
  308. "Failed to remove GID: %#x", rc);
  309. } else {
  310. ctx_tbl = sgid_tbl->ctx;
  311. ctx_tbl[ctx->idx] = NULL;
  312. kfree(ctx);
  313. }
  314. }
  315. } else {
  316. return -EINVAL;
  317. }
  318. return rc;
  319. }
  320. int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
  321. unsigned int index, const union ib_gid *gid,
  322. const struct ib_gid_attr *attr, void **context)
  323. {
  324. int rc;
  325. u32 tbl_idx = 0;
  326. u16 vlan_id = 0xFFFF;
  327. struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
  328. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  329. struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
  330. if ((attr->ndev) && is_vlan_dev(attr->ndev))
  331. vlan_id = vlan_dev_vlan_id(attr->ndev);
  332. rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
  333. rdev->qplib_res.netdev->dev_addr,
  334. vlan_id, true, &tbl_idx);
  335. if (rc == -EALREADY) {
  336. ctx_tbl = sgid_tbl->ctx;
  337. ctx_tbl[tbl_idx]->refcnt++;
  338. *context = ctx_tbl[tbl_idx];
  339. return 0;
  340. }
  341. if (rc < 0) {
  342. dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
  343. return rc;
  344. }
  345. ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  346. if (!ctx)
  347. return -ENOMEM;
  348. ctx_tbl = sgid_tbl->ctx;
  349. ctx->idx = tbl_idx;
  350. ctx->refcnt = 1;
  351. ctx_tbl[tbl_idx] = ctx;
  352. *context = ctx;
  353. return rc;
  354. }
  355. enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
  356. u8 port_num)
  357. {
  358. return IB_LINK_LAYER_ETHERNET;
  359. }
  360. #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
  361. static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
  362. {
  363. struct bnxt_re_fence_data *fence = &pd->fence;
  364. struct ib_mr *ib_mr = &fence->mr->ib_mr;
  365. struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
  366. memset(wqe, 0, sizeof(*wqe));
  367. wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
  368. wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
  369. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  370. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  371. wqe->bind.zero_based = false;
  372. wqe->bind.parent_l_key = ib_mr->lkey;
  373. wqe->bind.va = (u64)(unsigned long)fence->va;
  374. wqe->bind.length = fence->size;
  375. wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
  376. wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
  377. /* Save the initial rkey in fence structure for now;
  378. * wqe->bind.r_key will be set at (re)bind time.
  379. */
  380. fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
  381. }
  382. static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
  383. {
  384. struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
  385. qplib_qp);
  386. struct ib_pd *ib_pd = qp->ib_qp.pd;
  387. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  388. struct bnxt_re_fence_data *fence = &pd->fence;
  389. struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
  390. struct bnxt_qplib_swqe wqe;
  391. int rc;
  392. memcpy(&wqe, fence_wqe, sizeof(wqe));
  393. wqe.bind.r_key = fence->bind_rkey;
  394. fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
  395. dev_dbg(rdev_to_dev(qp->rdev),
  396. "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
  397. wqe.bind.r_key, qp->qplib_qp.id, pd);
  398. rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
  399. if (rc) {
  400. dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
  401. return rc;
  402. }
  403. bnxt_qplib_post_send_db(&qp->qplib_qp);
  404. return rc;
  405. }
  406. static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
  407. {
  408. struct bnxt_re_fence_data *fence = &pd->fence;
  409. struct bnxt_re_dev *rdev = pd->rdev;
  410. struct device *dev = &rdev->en_dev->pdev->dev;
  411. struct bnxt_re_mr *mr = fence->mr;
  412. if (fence->mw) {
  413. bnxt_re_dealloc_mw(fence->mw);
  414. fence->mw = NULL;
  415. }
  416. if (mr) {
  417. if (mr->ib_mr.rkey)
  418. bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
  419. true);
  420. if (mr->ib_mr.lkey)
  421. bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
  422. kfree(mr);
  423. fence->mr = NULL;
  424. }
  425. if (fence->dma_addr) {
  426. dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
  427. DMA_BIDIRECTIONAL);
  428. fence->dma_addr = 0;
  429. }
  430. }
  431. static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
  432. {
  433. int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
  434. struct bnxt_re_fence_data *fence = &pd->fence;
  435. struct bnxt_re_dev *rdev = pd->rdev;
  436. struct device *dev = &rdev->en_dev->pdev->dev;
  437. struct bnxt_re_mr *mr = NULL;
  438. dma_addr_t dma_addr = 0;
  439. struct ib_mw *mw;
  440. u64 pbl_tbl;
  441. int rc;
  442. dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
  443. DMA_BIDIRECTIONAL);
  444. rc = dma_mapping_error(dev, dma_addr);
  445. if (rc) {
  446. dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
  447. rc = -EIO;
  448. fence->dma_addr = 0;
  449. goto fail;
  450. }
  451. fence->dma_addr = dma_addr;
  452. /* Allocate a MR */
  453. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  454. if (!mr) {
  455. rc = -ENOMEM;
  456. goto fail;
  457. }
  458. fence->mr = mr;
  459. mr->rdev = rdev;
  460. mr->qplib_mr.pd = &pd->qplib_pd;
  461. mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
  462. mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
  463. rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
  464. if (rc) {
  465. dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
  466. goto fail;
  467. }
  468. /* Register MR */
  469. mr->ib_mr.lkey = mr->qplib_mr.lkey;
  470. mr->qplib_mr.va = (u64)(unsigned long)fence->va;
  471. mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
  472. pbl_tbl = dma_addr;
  473. rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
  474. BNXT_RE_FENCE_PBL_SIZE, false);
  475. if (rc) {
  476. dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
  477. goto fail;
  478. }
  479. mr->ib_mr.rkey = mr->qplib_mr.rkey;
  480. /* Create a fence MW only for kernel consumers */
  481. mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
  482. if (IS_ERR(mw)) {
  483. dev_err(rdev_to_dev(rdev),
  484. "Failed to create fence-MW for PD: %p\n", pd);
  485. rc = PTR_ERR(mw);
  486. goto fail;
  487. }
  488. fence->mw = mw;
  489. bnxt_re_create_fence_wqe(pd);
  490. return 0;
  491. fail:
  492. bnxt_re_destroy_fence_mr(pd);
  493. return rc;
  494. }
  495. /* Protection Domains */
  496. int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
  497. {
  498. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  499. struct bnxt_re_dev *rdev = pd->rdev;
  500. int rc;
  501. bnxt_re_destroy_fence_mr(pd);
  502. if (pd->qplib_pd.id) {
  503. rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
  504. &rdev->qplib_res.pd_tbl,
  505. &pd->qplib_pd);
  506. if (rc)
  507. dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
  508. }
  509. kfree(pd);
  510. return 0;
  511. }
  512. struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
  513. struct ib_ucontext *ucontext,
  514. struct ib_udata *udata)
  515. {
  516. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  517. struct bnxt_re_ucontext *ucntx = container_of(ucontext,
  518. struct bnxt_re_ucontext,
  519. ib_uctx);
  520. struct bnxt_re_pd *pd;
  521. int rc;
  522. pd = kzalloc(sizeof(*pd), GFP_KERNEL);
  523. if (!pd)
  524. return ERR_PTR(-ENOMEM);
  525. pd->rdev = rdev;
  526. if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
  527. dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
  528. rc = -ENOMEM;
  529. goto fail;
  530. }
  531. if (udata) {
  532. struct bnxt_re_pd_resp resp;
  533. if (!ucntx->dpi.dbr) {
  534. /* Allocate DPI in alloc_pd to avoid failing of
  535. * ibv_devinfo and family of application when DPIs
  536. * are depleted.
  537. */
  538. if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
  539. &ucntx->dpi, ucntx)) {
  540. rc = -ENOMEM;
  541. goto dbfail;
  542. }
  543. }
  544. resp.pdid = pd->qplib_pd.id;
  545. /* Still allow mapping this DBR to the new user PD. */
  546. resp.dpi = ucntx->dpi.dpi;
  547. resp.dbr = (u64)ucntx->dpi.umdbr;
  548. rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
  549. if (rc) {
  550. dev_err(rdev_to_dev(rdev),
  551. "Failed to copy user response\n");
  552. goto dbfail;
  553. }
  554. }
  555. if (!udata)
  556. if (bnxt_re_create_fence_mr(pd))
  557. dev_warn(rdev_to_dev(rdev),
  558. "Failed to create Fence-MR\n");
  559. return &pd->ib_pd;
  560. dbfail:
  561. (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
  562. &pd->qplib_pd);
  563. fail:
  564. kfree(pd);
  565. return ERR_PTR(rc);
  566. }
  567. /* Address Handles */
  568. int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
  569. {
  570. struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
  571. struct bnxt_re_dev *rdev = ah->rdev;
  572. int rc;
  573. rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
  574. if (rc) {
  575. dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
  576. return rc;
  577. }
  578. kfree(ah);
  579. return 0;
  580. }
  581. struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
  582. struct rdma_ah_attr *ah_attr,
  583. struct ib_udata *udata)
  584. {
  585. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  586. struct bnxt_re_dev *rdev = pd->rdev;
  587. struct bnxt_re_ah *ah;
  588. const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
  589. int rc;
  590. u8 nw_type;
  591. struct ib_gid_attr sgid_attr;
  592. if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
  593. dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
  594. return ERR_PTR(-EINVAL);
  595. }
  596. ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
  597. if (!ah)
  598. return ERR_PTR(-ENOMEM);
  599. ah->rdev = rdev;
  600. ah->qplib_ah.pd = &pd->qplib_pd;
  601. /* Supply the configuration for the HW */
  602. memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
  603. sizeof(union ib_gid));
  604. /*
  605. * If RoCE V2 is enabled, stack will have two entries for
  606. * each GID entry. Avoiding this duplicte entry in HW. Dividing
  607. * the GID index by 2 for RoCE V2
  608. */
  609. ah->qplib_ah.sgid_index = grh->sgid_index / 2;
  610. ah->qplib_ah.host_sgid_index = grh->sgid_index;
  611. ah->qplib_ah.traffic_class = grh->traffic_class;
  612. ah->qplib_ah.flow_label = grh->flow_label;
  613. ah->qplib_ah.hop_limit = grh->hop_limit;
  614. ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
  615. if (ib_pd->uobject &&
  616. !rdma_is_multicast_addr((struct in6_addr *)
  617. grh->dgid.raw) &&
  618. !rdma_link_local_addr((struct in6_addr *)
  619. grh->dgid.raw)) {
  620. union ib_gid sgid;
  621. rc = ib_get_cached_gid(&rdev->ibdev, 1,
  622. grh->sgid_index, &sgid,
  623. &sgid_attr);
  624. if (rc) {
  625. dev_err(rdev_to_dev(rdev),
  626. "Failed to query gid at index %d",
  627. grh->sgid_index);
  628. goto fail;
  629. }
  630. if (sgid_attr.ndev)
  631. dev_put(sgid_attr.ndev);
  632. /* Get network header type for this GID */
  633. nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
  634. switch (nw_type) {
  635. case RDMA_NETWORK_IPV4:
  636. ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
  637. break;
  638. case RDMA_NETWORK_IPV6:
  639. ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
  640. break;
  641. default:
  642. ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
  643. break;
  644. }
  645. }
  646. memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
  647. rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
  648. if (rc) {
  649. dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
  650. goto fail;
  651. }
  652. /* Write AVID to shared page. */
  653. if (ib_pd->uobject) {
  654. struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
  655. struct bnxt_re_ucontext *uctx;
  656. unsigned long flag;
  657. u32 *wrptr;
  658. uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
  659. spin_lock_irqsave(&uctx->sh_lock, flag);
  660. wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
  661. *wrptr = ah->qplib_ah.id;
  662. wmb(); /* make sure cache is updated. */
  663. spin_unlock_irqrestore(&uctx->sh_lock, flag);
  664. }
  665. return &ah->ib_ah;
  666. fail:
  667. kfree(ah);
  668. return ERR_PTR(rc);
  669. }
  670. int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
  671. {
  672. return 0;
  673. }
  674. int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
  675. {
  676. struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
  677. ah_attr->type = ib_ah->type;
  678. rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
  679. memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
  680. rdma_ah_set_grh(ah_attr, NULL, 0,
  681. ah->qplib_ah.host_sgid_index,
  682. 0, ah->qplib_ah.traffic_class);
  683. rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
  684. rdma_ah_set_port_num(ah_attr, 1);
  685. rdma_ah_set_static_rate(ah_attr, 0);
  686. return 0;
  687. }
  688. /* Queue Pairs */
  689. int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
  690. {
  691. struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
  692. struct bnxt_re_dev *rdev = qp->rdev;
  693. int rc;
  694. bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
  695. bnxt_qplib_del_flush_qp(&qp->qplib_qp);
  696. rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
  697. if (rc) {
  698. dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
  699. return rc;
  700. }
  701. if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
  702. rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
  703. &rdev->sqp_ah->qplib_ah);
  704. if (rc) {
  705. dev_err(rdev_to_dev(rdev),
  706. "Failed to destroy HW AH for shadow QP");
  707. return rc;
  708. }
  709. bnxt_qplib_del_flush_qp(&qp->qplib_qp);
  710. rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
  711. &rdev->qp1_sqp->qplib_qp);
  712. if (rc) {
  713. dev_err(rdev_to_dev(rdev),
  714. "Failed to destroy Shadow QP");
  715. return rc;
  716. }
  717. mutex_lock(&rdev->qp_lock);
  718. list_del(&rdev->qp1_sqp->list);
  719. atomic_dec(&rdev->qp_count);
  720. mutex_unlock(&rdev->qp_lock);
  721. kfree(rdev->sqp_ah);
  722. kfree(rdev->qp1_sqp);
  723. rdev->qp1_sqp = NULL;
  724. rdev->sqp_ah = NULL;
  725. }
  726. if (!IS_ERR_OR_NULL(qp->rumem))
  727. ib_umem_release(qp->rumem);
  728. if (!IS_ERR_OR_NULL(qp->sumem))
  729. ib_umem_release(qp->sumem);
  730. mutex_lock(&rdev->qp_lock);
  731. list_del(&qp->list);
  732. atomic_dec(&rdev->qp_count);
  733. mutex_unlock(&rdev->qp_lock);
  734. kfree(qp);
  735. return 0;
  736. }
  737. static u8 __from_ib_qp_type(enum ib_qp_type type)
  738. {
  739. switch (type) {
  740. case IB_QPT_GSI:
  741. return CMDQ_CREATE_QP1_TYPE_GSI;
  742. case IB_QPT_RC:
  743. return CMDQ_CREATE_QP_TYPE_RC;
  744. case IB_QPT_UD:
  745. return CMDQ_CREATE_QP_TYPE_UD;
  746. default:
  747. return IB_QPT_MAX;
  748. }
  749. }
  750. static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
  751. struct bnxt_re_qp *qp, struct ib_udata *udata)
  752. {
  753. struct bnxt_re_qp_req ureq;
  754. struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
  755. struct ib_umem *umem;
  756. int bytes = 0;
  757. struct ib_ucontext *context = pd->ib_pd.uobject->context;
  758. struct bnxt_re_ucontext *cntx = container_of(context,
  759. struct bnxt_re_ucontext,
  760. ib_uctx);
  761. if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
  762. return -EFAULT;
  763. bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
  764. /* Consider mapping PSN search memory only for RC QPs. */
  765. if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
  766. bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
  767. bytes = PAGE_ALIGN(bytes);
  768. umem = ib_umem_get(context, ureq.qpsva, bytes,
  769. IB_ACCESS_LOCAL_WRITE, 1);
  770. if (IS_ERR(umem))
  771. return PTR_ERR(umem);
  772. qp->sumem = umem;
  773. qplib_qp->sq.sglist = umem->sg_head.sgl;
  774. qplib_qp->sq.nmap = umem->nmap;
  775. qplib_qp->qp_handle = ureq.qp_handle;
  776. if (!qp->qplib_qp.srq) {
  777. bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
  778. bytes = PAGE_ALIGN(bytes);
  779. umem = ib_umem_get(context, ureq.qprva, bytes,
  780. IB_ACCESS_LOCAL_WRITE, 1);
  781. if (IS_ERR(umem))
  782. goto rqfail;
  783. qp->rumem = umem;
  784. qplib_qp->rq.sglist = umem->sg_head.sgl;
  785. qplib_qp->rq.nmap = umem->nmap;
  786. }
  787. qplib_qp->dpi = &cntx->dpi;
  788. return 0;
  789. rqfail:
  790. ib_umem_release(qp->sumem);
  791. qp->sumem = NULL;
  792. qplib_qp->sq.sglist = NULL;
  793. qplib_qp->sq.nmap = 0;
  794. return PTR_ERR(umem);
  795. }
  796. static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
  797. (struct bnxt_re_pd *pd,
  798. struct bnxt_qplib_res *qp1_res,
  799. struct bnxt_qplib_qp *qp1_qp)
  800. {
  801. struct bnxt_re_dev *rdev = pd->rdev;
  802. struct bnxt_re_ah *ah;
  803. union ib_gid sgid;
  804. int rc;
  805. ah = kzalloc(sizeof(*ah), GFP_KERNEL);
  806. if (!ah)
  807. return NULL;
  808. ah->rdev = rdev;
  809. ah->qplib_ah.pd = &pd->qplib_pd;
  810. rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
  811. if (rc)
  812. goto fail;
  813. /* supply the dgid data same as sgid */
  814. memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
  815. sizeof(union ib_gid));
  816. ah->qplib_ah.sgid_index = 0;
  817. ah->qplib_ah.traffic_class = 0;
  818. ah->qplib_ah.flow_label = 0;
  819. ah->qplib_ah.hop_limit = 1;
  820. ah->qplib_ah.sl = 0;
  821. /* Have DMAC same as SMAC */
  822. ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
  823. rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
  824. if (rc) {
  825. dev_err(rdev_to_dev(rdev),
  826. "Failed to allocate HW AH for Shadow QP");
  827. goto fail;
  828. }
  829. return ah;
  830. fail:
  831. kfree(ah);
  832. return NULL;
  833. }
  834. static struct bnxt_re_qp *bnxt_re_create_shadow_qp
  835. (struct bnxt_re_pd *pd,
  836. struct bnxt_qplib_res *qp1_res,
  837. struct bnxt_qplib_qp *qp1_qp)
  838. {
  839. struct bnxt_re_dev *rdev = pd->rdev;
  840. struct bnxt_re_qp *qp;
  841. int rc;
  842. qp = kzalloc(sizeof(*qp), GFP_KERNEL);
  843. if (!qp)
  844. return NULL;
  845. qp->rdev = rdev;
  846. /* Initialize the shadow QP structure from the QP1 values */
  847. ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
  848. qp->qplib_qp.pd = &pd->qplib_pd;
  849. qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
  850. qp->qplib_qp.type = IB_QPT_UD;
  851. qp->qplib_qp.max_inline_data = 0;
  852. qp->qplib_qp.sig_type = true;
  853. /* Shadow QP SQ depth should be same as QP1 RQ depth */
  854. qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
  855. qp->qplib_qp.sq.max_sge = 2;
  856. /* Q full delta can be 1 since it is internal QP */
  857. qp->qplib_qp.sq.q_full_delta = 1;
  858. qp->qplib_qp.scq = qp1_qp->scq;
  859. qp->qplib_qp.rcq = qp1_qp->rcq;
  860. qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
  861. qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
  862. /* Q full delta can be 1 since it is internal QP */
  863. qp->qplib_qp.rq.q_full_delta = 1;
  864. qp->qplib_qp.mtu = qp1_qp->mtu;
  865. qp->qplib_qp.sq_hdr_buf_size = 0;
  866. qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
  867. qp->qplib_qp.dpi = &rdev->dpi_privileged;
  868. rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
  869. if (rc)
  870. goto fail;
  871. rdev->sqp_id = qp->qplib_qp.id;
  872. spin_lock_init(&qp->sq_lock);
  873. INIT_LIST_HEAD(&qp->list);
  874. mutex_lock(&rdev->qp_lock);
  875. list_add_tail(&qp->list, &rdev->qp_list);
  876. atomic_inc(&rdev->qp_count);
  877. mutex_unlock(&rdev->qp_lock);
  878. return qp;
  879. fail:
  880. kfree(qp);
  881. return NULL;
  882. }
  883. struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
  884. struct ib_qp_init_attr *qp_init_attr,
  885. struct ib_udata *udata)
  886. {
  887. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  888. struct bnxt_re_dev *rdev = pd->rdev;
  889. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  890. struct bnxt_re_qp *qp;
  891. struct bnxt_re_cq *cq;
  892. int rc, entries;
  893. if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
  894. (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
  895. (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
  896. (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
  897. (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
  898. return ERR_PTR(-EINVAL);
  899. qp = kzalloc(sizeof(*qp), GFP_KERNEL);
  900. if (!qp)
  901. return ERR_PTR(-ENOMEM);
  902. qp->rdev = rdev;
  903. ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
  904. qp->qplib_qp.pd = &pd->qplib_pd;
  905. qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
  906. qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
  907. if (qp->qplib_qp.type == IB_QPT_MAX) {
  908. dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
  909. qp->qplib_qp.type);
  910. rc = -EINVAL;
  911. goto fail;
  912. }
  913. qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
  914. qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
  915. IB_SIGNAL_ALL_WR) ? true : false);
  916. qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
  917. if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
  918. qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
  919. if (qp_init_attr->send_cq) {
  920. cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
  921. ib_cq);
  922. if (!cq) {
  923. dev_err(rdev_to_dev(rdev), "Send CQ not found");
  924. rc = -EINVAL;
  925. goto fail;
  926. }
  927. qp->qplib_qp.scq = &cq->qplib_cq;
  928. }
  929. if (qp_init_attr->recv_cq) {
  930. cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
  931. ib_cq);
  932. if (!cq) {
  933. dev_err(rdev_to_dev(rdev), "Receive CQ not found");
  934. rc = -EINVAL;
  935. goto fail;
  936. }
  937. qp->qplib_qp.rcq = &cq->qplib_cq;
  938. }
  939. if (qp_init_attr->srq) {
  940. dev_err(rdev_to_dev(rdev), "SRQ not supported");
  941. rc = -ENOTSUPP;
  942. goto fail;
  943. } else {
  944. /* Allocate 1 more than what's provided so posting max doesn't
  945. * mean empty
  946. */
  947. entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
  948. qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
  949. dev_attr->max_qp_wqes + 1);
  950. qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
  951. qp_init_attr->cap.max_recv_wr;
  952. qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
  953. if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
  954. qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
  955. }
  956. qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
  957. if (qp_init_attr->qp_type == IB_QPT_GSI) {
  958. /* Allocate 1 more than what's provided */
  959. entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
  960. qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
  961. dev_attr->max_qp_wqes + 1);
  962. qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
  963. qp_init_attr->cap.max_send_wr;
  964. qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
  965. if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
  966. qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
  967. qp->qplib_qp.sq.max_sge++;
  968. if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
  969. qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
  970. qp->qplib_qp.rq_hdr_buf_size =
  971. BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
  972. qp->qplib_qp.sq_hdr_buf_size =
  973. BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
  974. qp->qplib_qp.dpi = &rdev->dpi_privileged;
  975. rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
  976. if (rc) {
  977. dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
  978. goto fail;
  979. }
  980. /* Create a shadow QP to handle the QP1 traffic */
  981. rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
  982. &qp->qplib_qp);
  983. if (!rdev->qp1_sqp) {
  984. rc = -EINVAL;
  985. dev_err(rdev_to_dev(rdev),
  986. "Failed to create Shadow QP for QP1");
  987. goto qp_destroy;
  988. }
  989. rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
  990. &qp->qplib_qp);
  991. if (!rdev->sqp_ah) {
  992. bnxt_qplib_destroy_qp(&rdev->qplib_res,
  993. &rdev->qp1_sqp->qplib_qp);
  994. rc = -EINVAL;
  995. dev_err(rdev_to_dev(rdev),
  996. "Failed to create AH entry for ShadowQP");
  997. goto qp_destroy;
  998. }
  999. } else {
  1000. /* Allocate 128 + 1 more than what's provided */
  1001. entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
  1002. BNXT_QPLIB_RESERVED_QP_WRS + 1);
  1003. qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
  1004. dev_attr->max_qp_wqes +
  1005. BNXT_QPLIB_RESERVED_QP_WRS + 1);
  1006. qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
  1007. /*
  1008. * Reserving one slot for Phantom WQE. Application can
  1009. * post one extra entry in this case. But allowing this to avoid
  1010. * unexpected Queue full condition
  1011. */
  1012. qp->qplib_qp.sq.q_full_delta -= 1;
  1013. qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
  1014. qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
  1015. if (udata) {
  1016. rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
  1017. if (rc)
  1018. goto fail;
  1019. } else {
  1020. qp->qplib_qp.dpi = &rdev->dpi_privileged;
  1021. }
  1022. rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
  1023. if (rc) {
  1024. dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
  1025. goto fail;
  1026. }
  1027. }
  1028. qp->ib_qp.qp_num = qp->qplib_qp.id;
  1029. spin_lock_init(&qp->sq_lock);
  1030. spin_lock_init(&qp->rq_lock);
  1031. if (udata) {
  1032. struct bnxt_re_qp_resp resp;
  1033. resp.qpid = qp->ib_qp.qp_num;
  1034. resp.rsvd = 0;
  1035. rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
  1036. if (rc) {
  1037. dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
  1038. goto qp_destroy;
  1039. }
  1040. }
  1041. INIT_LIST_HEAD(&qp->list);
  1042. mutex_lock(&rdev->qp_lock);
  1043. list_add_tail(&qp->list, &rdev->qp_list);
  1044. atomic_inc(&rdev->qp_count);
  1045. mutex_unlock(&rdev->qp_lock);
  1046. return &qp->ib_qp;
  1047. qp_destroy:
  1048. bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
  1049. fail:
  1050. kfree(qp);
  1051. return ERR_PTR(rc);
  1052. }
  1053. static u8 __from_ib_qp_state(enum ib_qp_state state)
  1054. {
  1055. switch (state) {
  1056. case IB_QPS_RESET:
  1057. return CMDQ_MODIFY_QP_NEW_STATE_RESET;
  1058. case IB_QPS_INIT:
  1059. return CMDQ_MODIFY_QP_NEW_STATE_INIT;
  1060. case IB_QPS_RTR:
  1061. return CMDQ_MODIFY_QP_NEW_STATE_RTR;
  1062. case IB_QPS_RTS:
  1063. return CMDQ_MODIFY_QP_NEW_STATE_RTS;
  1064. case IB_QPS_SQD:
  1065. return CMDQ_MODIFY_QP_NEW_STATE_SQD;
  1066. case IB_QPS_SQE:
  1067. return CMDQ_MODIFY_QP_NEW_STATE_SQE;
  1068. case IB_QPS_ERR:
  1069. default:
  1070. return CMDQ_MODIFY_QP_NEW_STATE_ERR;
  1071. }
  1072. }
  1073. static enum ib_qp_state __to_ib_qp_state(u8 state)
  1074. {
  1075. switch (state) {
  1076. case CMDQ_MODIFY_QP_NEW_STATE_RESET:
  1077. return IB_QPS_RESET;
  1078. case CMDQ_MODIFY_QP_NEW_STATE_INIT:
  1079. return IB_QPS_INIT;
  1080. case CMDQ_MODIFY_QP_NEW_STATE_RTR:
  1081. return IB_QPS_RTR;
  1082. case CMDQ_MODIFY_QP_NEW_STATE_RTS:
  1083. return IB_QPS_RTS;
  1084. case CMDQ_MODIFY_QP_NEW_STATE_SQD:
  1085. return IB_QPS_SQD;
  1086. case CMDQ_MODIFY_QP_NEW_STATE_SQE:
  1087. return IB_QPS_SQE;
  1088. case CMDQ_MODIFY_QP_NEW_STATE_ERR:
  1089. default:
  1090. return IB_QPS_ERR;
  1091. }
  1092. }
  1093. static u32 __from_ib_mtu(enum ib_mtu mtu)
  1094. {
  1095. switch (mtu) {
  1096. case IB_MTU_256:
  1097. return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
  1098. case IB_MTU_512:
  1099. return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
  1100. case IB_MTU_1024:
  1101. return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
  1102. case IB_MTU_2048:
  1103. return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
  1104. case IB_MTU_4096:
  1105. return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
  1106. default:
  1107. return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
  1108. }
  1109. }
  1110. static enum ib_mtu __to_ib_mtu(u32 mtu)
  1111. {
  1112. switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
  1113. case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
  1114. return IB_MTU_256;
  1115. case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
  1116. return IB_MTU_512;
  1117. case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
  1118. return IB_MTU_1024;
  1119. case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
  1120. return IB_MTU_2048;
  1121. case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
  1122. return IB_MTU_4096;
  1123. default:
  1124. return IB_MTU_2048;
  1125. }
  1126. }
  1127. static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
  1128. struct bnxt_re_qp *qp1_qp,
  1129. int qp_attr_mask)
  1130. {
  1131. struct bnxt_re_qp *qp = rdev->qp1_sqp;
  1132. int rc = 0;
  1133. if (qp_attr_mask & IB_QP_STATE) {
  1134. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
  1135. qp->qplib_qp.state = qp1_qp->qplib_qp.state;
  1136. }
  1137. if (qp_attr_mask & IB_QP_PKEY_INDEX) {
  1138. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
  1139. qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
  1140. }
  1141. if (qp_attr_mask & IB_QP_QKEY) {
  1142. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
  1143. /* Using a Random QKEY */
  1144. qp->qplib_qp.qkey = 0x81818181;
  1145. }
  1146. if (qp_attr_mask & IB_QP_SQ_PSN) {
  1147. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
  1148. qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
  1149. }
  1150. rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
  1151. if (rc)
  1152. dev_err(rdev_to_dev(rdev),
  1153. "Failed to modify Shadow QP for QP1");
  1154. return rc;
  1155. }
  1156. int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
  1157. int qp_attr_mask, struct ib_udata *udata)
  1158. {
  1159. struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
  1160. struct bnxt_re_dev *rdev = qp->rdev;
  1161. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  1162. enum ib_qp_state curr_qp_state, new_qp_state;
  1163. int rc, entries;
  1164. int status;
  1165. union ib_gid sgid;
  1166. struct ib_gid_attr sgid_attr;
  1167. u8 nw_type;
  1168. qp->qplib_qp.modify_flags = 0;
  1169. if (qp_attr_mask & IB_QP_STATE) {
  1170. curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
  1171. new_qp_state = qp_attr->qp_state;
  1172. if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
  1173. ib_qp->qp_type, qp_attr_mask,
  1174. IB_LINK_LAYER_ETHERNET)) {
  1175. dev_err(rdev_to_dev(rdev),
  1176. "Invalid attribute mask: %#x specified ",
  1177. qp_attr_mask);
  1178. dev_err(rdev_to_dev(rdev),
  1179. "for qpn: %#x type: %#x",
  1180. ib_qp->qp_num, ib_qp->qp_type);
  1181. dev_err(rdev_to_dev(rdev),
  1182. "curr_qp_state=0x%x, new_qp_state=0x%x\n",
  1183. curr_qp_state, new_qp_state);
  1184. return -EINVAL;
  1185. }
  1186. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
  1187. qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
  1188. if (!qp->sumem &&
  1189. qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
  1190. dev_dbg(rdev_to_dev(rdev),
  1191. "Move QP = %p to flush list\n",
  1192. qp);
  1193. bnxt_qplib_add_flush_qp(&qp->qplib_qp);
  1194. }
  1195. if (!qp->sumem &&
  1196. qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
  1197. dev_dbg(rdev_to_dev(rdev),
  1198. "Move QP = %p out of flush list\n",
  1199. qp);
  1200. bnxt_qplib_del_flush_qp(&qp->qplib_qp);
  1201. }
  1202. }
  1203. if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
  1204. qp->qplib_qp.modify_flags |=
  1205. CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
  1206. qp->qplib_qp.en_sqd_async_notify = true;
  1207. }
  1208. if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
  1209. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
  1210. qp->qplib_qp.access =
  1211. __from_ib_access_flags(qp_attr->qp_access_flags);
  1212. /* LOCAL_WRITE access must be set to allow RC receive */
  1213. qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
  1214. }
  1215. if (qp_attr_mask & IB_QP_PKEY_INDEX) {
  1216. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
  1217. qp->qplib_qp.pkey_index = qp_attr->pkey_index;
  1218. }
  1219. if (qp_attr_mask & IB_QP_QKEY) {
  1220. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
  1221. qp->qplib_qp.qkey = qp_attr->qkey;
  1222. }
  1223. if (qp_attr_mask & IB_QP_AV) {
  1224. const struct ib_global_route *grh =
  1225. rdma_ah_read_grh(&qp_attr->ah_attr);
  1226. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
  1227. CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
  1228. CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
  1229. CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
  1230. CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
  1231. CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
  1232. CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
  1233. memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
  1234. sizeof(qp->qplib_qp.ah.dgid.data));
  1235. qp->qplib_qp.ah.flow_label = grh->flow_label;
  1236. /* If RoCE V2 is enabled, stack will have two entries for
  1237. * each GID entry. Avoiding this duplicte entry in HW. Dividing
  1238. * the GID index by 2 for RoCE V2
  1239. */
  1240. qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
  1241. qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
  1242. qp->qplib_qp.ah.hop_limit = grh->hop_limit;
  1243. qp->qplib_qp.ah.traffic_class = grh->traffic_class;
  1244. qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
  1245. ether_addr_copy(qp->qplib_qp.ah.dmac,
  1246. qp_attr->ah_attr.roce.dmac);
  1247. status = ib_get_cached_gid(&rdev->ibdev, 1,
  1248. grh->sgid_index,
  1249. &sgid, &sgid_attr);
  1250. if (!status && sgid_attr.ndev) {
  1251. memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
  1252. ETH_ALEN);
  1253. dev_put(sgid_attr.ndev);
  1254. nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
  1255. &sgid);
  1256. switch (nw_type) {
  1257. case RDMA_NETWORK_IPV4:
  1258. qp->qplib_qp.nw_type =
  1259. CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
  1260. break;
  1261. case RDMA_NETWORK_IPV6:
  1262. qp->qplib_qp.nw_type =
  1263. CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
  1264. break;
  1265. default:
  1266. qp->qplib_qp.nw_type =
  1267. CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
  1268. break;
  1269. }
  1270. }
  1271. }
  1272. if (qp_attr_mask & IB_QP_PATH_MTU) {
  1273. qp->qplib_qp.modify_flags |=
  1274. CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
  1275. qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
  1276. qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
  1277. } else if (qp_attr->qp_state == IB_QPS_RTR) {
  1278. qp->qplib_qp.modify_flags |=
  1279. CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
  1280. qp->qplib_qp.path_mtu =
  1281. __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
  1282. qp->qplib_qp.mtu =
  1283. ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
  1284. }
  1285. if (qp_attr_mask & IB_QP_TIMEOUT) {
  1286. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
  1287. qp->qplib_qp.timeout = qp_attr->timeout;
  1288. }
  1289. if (qp_attr_mask & IB_QP_RETRY_CNT) {
  1290. qp->qplib_qp.modify_flags |=
  1291. CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
  1292. qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
  1293. }
  1294. if (qp_attr_mask & IB_QP_RNR_RETRY) {
  1295. qp->qplib_qp.modify_flags |=
  1296. CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
  1297. qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
  1298. }
  1299. if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
  1300. qp->qplib_qp.modify_flags |=
  1301. CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
  1302. qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
  1303. }
  1304. if (qp_attr_mask & IB_QP_RQ_PSN) {
  1305. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
  1306. qp->qplib_qp.rq.psn = qp_attr->rq_psn;
  1307. }
  1308. if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
  1309. qp->qplib_qp.modify_flags |=
  1310. CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
  1311. /* Cap the max_rd_atomic to device max */
  1312. qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
  1313. dev_attr->max_qp_rd_atom);
  1314. }
  1315. if (qp_attr_mask & IB_QP_SQ_PSN) {
  1316. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
  1317. qp->qplib_qp.sq.psn = qp_attr->sq_psn;
  1318. }
  1319. if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
  1320. if (qp_attr->max_dest_rd_atomic >
  1321. dev_attr->max_qp_init_rd_atom) {
  1322. dev_err(rdev_to_dev(rdev),
  1323. "max_dest_rd_atomic requested%d is > dev_max%d",
  1324. qp_attr->max_dest_rd_atomic,
  1325. dev_attr->max_qp_init_rd_atom);
  1326. return -EINVAL;
  1327. }
  1328. qp->qplib_qp.modify_flags |=
  1329. CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
  1330. qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
  1331. }
  1332. if (qp_attr_mask & IB_QP_CAP) {
  1333. qp->qplib_qp.modify_flags |=
  1334. CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
  1335. CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
  1336. CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
  1337. CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
  1338. CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
  1339. if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
  1340. (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
  1341. (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
  1342. (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
  1343. (qp_attr->cap.max_inline_data >=
  1344. dev_attr->max_inline_data)) {
  1345. dev_err(rdev_to_dev(rdev),
  1346. "Create QP failed - max exceeded");
  1347. return -EINVAL;
  1348. }
  1349. entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
  1350. qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
  1351. dev_attr->max_qp_wqes + 1);
  1352. qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
  1353. qp_attr->cap.max_send_wr;
  1354. /*
  1355. * Reserving one slot for Phantom WQE. Some application can
  1356. * post one extra entry in this case. Allowing this to avoid
  1357. * unexpected Queue full condition
  1358. */
  1359. qp->qplib_qp.sq.q_full_delta -= 1;
  1360. qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
  1361. if (qp->qplib_qp.rq.max_wqe) {
  1362. entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
  1363. qp->qplib_qp.rq.max_wqe =
  1364. min_t(u32, entries, dev_attr->max_qp_wqes + 1);
  1365. qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
  1366. qp_attr->cap.max_recv_wr;
  1367. qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
  1368. } else {
  1369. /* SRQ was used prior, just ignore the RQ caps */
  1370. }
  1371. }
  1372. if (qp_attr_mask & IB_QP_DEST_QPN) {
  1373. qp->qplib_qp.modify_flags |=
  1374. CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
  1375. qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
  1376. }
  1377. rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
  1378. if (rc) {
  1379. dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
  1380. return rc;
  1381. }
  1382. if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
  1383. rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
  1384. return rc;
  1385. }
  1386. int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
  1387. int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
  1388. {
  1389. struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
  1390. struct bnxt_re_dev *rdev = qp->rdev;
  1391. struct bnxt_qplib_qp *qplib_qp;
  1392. int rc;
  1393. qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
  1394. if (!qplib_qp)
  1395. return -ENOMEM;
  1396. qplib_qp->id = qp->qplib_qp.id;
  1397. qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
  1398. rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
  1399. if (rc) {
  1400. dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
  1401. goto out;
  1402. }
  1403. qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
  1404. qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
  1405. qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
  1406. qp_attr->pkey_index = qplib_qp->pkey_index;
  1407. qp_attr->qkey = qplib_qp->qkey;
  1408. qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
  1409. rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
  1410. qplib_qp->ah.host_sgid_index,
  1411. qplib_qp->ah.hop_limit,
  1412. qplib_qp->ah.traffic_class);
  1413. rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
  1414. rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
  1415. ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
  1416. qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
  1417. qp_attr->timeout = qplib_qp->timeout;
  1418. qp_attr->retry_cnt = qplib_qp->retry_cnt;
  1419. qp_attr->rnr_retry = qplib_qp->rnr_retry;
  1420. qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
  1421. qp_attr->rq_psn = qplib_qp->rq.psn;
  1422. qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
  1423. qp_attr->sq_psn = qplib_qp->sq.psn;
  1424. qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
  1425. qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
  1426. IB_SIGNAL_REQ_WR;
  1427. qp_attr->dest_qp_num = qplib_qp->dest_qpn;
  1428. qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
  1429. qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
  1430. qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
  1431. qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
  1432. qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
  1433. qp_init_attr->cap = qp_attr->cap;
  1434. out:
  1435. kfree(qplib_qp);
  1436. return rc;
  1437. }
  1438. /* Routine for sending QP1 packets for RoCE V1 an V2
  1439. */
  1440. static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
  1441. struct ib_send_wr *wr,
  1442. struct bnxt_qplib_swqe *wqe,
  1443. int payload_size)
  1444. {
  1445. struct ib_device *ibdev = &qp->rdev->ibdev;
  1446. struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
  1447. ib_ah);
  1448. struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
  1449. struct bnxt_qplib_sge sge;
  1450. union ib_gid sgid;
  1451. u8 nw_type;
  1452. u16 ether_type;
  1453. struct ib_gid_attr sgid_attr;
  1454. union ib_gid dgid;
  1455. bool is_eth = false;
  1456. bool is_vlan = false;
  1457. bool is_grh = false;
  1458. bool is_udp = false;
  1459. u8 ip_version = 0;
  1460. u16 vlan_id = 0xFFFF;
  1461. void *buf;
  1462. int i, rc = 0;
  1463. memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
  1464. rc = ib_get_cached_gid(ibdev, 1,
  1465. qplib_ah->host_sgid_index, &sgid,
  1466. &sgid_attr);
  1467. if (rc) {
  1468. dev_err(rdev_to_dev(qp->rdev),
  1469. "Failed to query gid at index %d",
  1470. qplib_ah->host_sgid_index);
  1471. return rc;
  1472. }
  1473. if (sgid_attr.ndev) {
  1474. if (is_vlan_dev(sgid_attr.ndev))
  1475. vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
  1476. dev_put(sgid_attr.ndev);
  1477. }
  1478. /* Get network header type for this GID */
  1479. nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
  1480. switch (nw_type) {
  1481. case RDMA_NETWORK_IPV4:
  1482. nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
  1483. break;
  1484. case RDMA_NETWORK_IPV6:
  1485. nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
  1486. break;
  1487. default:
  1488. nw_type = BNXT_RE_ROCE_V1_PACKET;
  1489. break;
  1490. }
  1491. memcpy(&dgid.raw, &qplib_ah->dgid, 16);
  1492. is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
  1493. if (is_udp) {
  1494. if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
  1495. ip_version = 4;
  1496. ether_type = ETH_P_IP;
  1497. } else {
  1498. ip_version = 6;
  1499. ether_type = ETH_P_IPV6;
  1500. }
  1501. is_grh = false;
  1502. } else {
  1503. ether_type = ETH_P_IBOE;
  1504. is_grh = true;
  1505. }
  1506. is_eth = true;
  1507. is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
  1508. ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
  1509. ip_version, is_udp, 0, &qp->qp1_hdr);
  1510. /* ETH */
  1511. ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
  1512. ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
  1513. /* For vlan, check the sgid for vlan existence */
  1514. if (!is_vlan) {
  1515. qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
  1516. } else {
  1517. qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
  1518. qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
  1519. }
  1520. if (is_grh || (ip_version == 6)) {
  1521. memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
  1522. memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
  1523. sizeof(sgid));
  1524. qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
  1525. }
  1526. if (ip_version == 4) {
  1527. qp->qp1_hdr.ip4.tos = 0;
  1528. qp->qp1_hdr.ip4.id = 0;
  1529. qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
  1530. qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
  1531. memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
  1532. memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
  1533. qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
  1534. }
  1535. if (is_udp) {
  1536. qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
  1537. qp->qp1_hdr.udp.sport = htons(0x8CD1);
  1538. qp->qp1_hdr.udp.csum = 0;
  1539. }
  1540. /* BTH */
  1541. if (wr->opcode == IB_WR_SEND_WITH_IMM) {
  1542. qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
  1543. qp->qp1_hdr.immediate_present = 1;
  1544. } else {
  1545. qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
  1546. }
  1547. if (wr->send_flags & IB_SEND_SOLICITED)
  1548. qp->qp1_hdr.bth.solicited_event = 1;
  1549. /* pad_count */
  1550. qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
  1551. /* P_key for QP1 is for all members */
  1552. qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
  1553. qp->qp1_hdr.bth.destination_qpn = IB_QP1;
  1554. qp->qp1_hdr.bth.ack_req = 0;
  1555. qp->send_psn++;
  1556. qp->send_psn &= BTH_PSN_MASK;
  1557. qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
  1558. /* DETH */
  1559. /* Use the priviledged Q_Key for QP1 */
  1560. qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
  1561. qp->qp1_hdr.deth.source_qpn = IB_QP1;
  1562. /* Pack the QP1 to the transmit buffer */
  1563. buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
  1564. if (buf) {
  1565. ib_ud_header_pack(&qp->qp1_hdr, buf);
  1566. for (i = wqe->num_sge; i; i--) {
  1567. wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
  1568. wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
  1569. wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
  1570. }
  1571. /*
  1572. * Max Header buf size for IPV6 RoCE V2 is 86,
  1573. * which is same as the QP1 SQ header buffer.
  1574. * Header buf size for IPV4 RoCE V2 can be 66.
  1575. * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
  1576. * Subtract 20 bytes from QP1 SQ header buf size
  1577. */
  1578. if (is_udp && ip_version == 4)
  1579. sge.size -= 20;
  1580. /*
  1581. * Max Header buf size for RoCE V1 is 78.
  1582. * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
  1583. * Subtract 8 bytes from QP1 SQ header buf size
  1584. */
  1585. if (!is_udp)
  1586. sge.size -= 8;
  1587. /* Subtract 4 bytes for non vlan packets */
  1588. if (!is_vlan)
  1589. sge.size -= 4;
  1590. wqe->sg_list[0].addr = sge.addr;
  1591. wqe->sg_list[0].lkey = sge.lkey;
  1592. wqe->sg_list[0].size = sge.size;
  1593. wqe->num_sge++;
  1594. } else {
  1595. dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
  1596. rc = -ENOMEM;
  1597. }
  1598. return rc;
  1599. }
  1600. /* For the MAD layer, it only provides the recv SGE the size of
  1601. * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
  1602. * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
  1603. * receive packet (334 bytes) with no VLAN and then copy the GRH
  1604. * and the MAD datagram out to the provided SGE.
  1605. */
  1606. static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
  1607. struct ib_recv_wr *wr,
  1608. struct bnxt_qplib_swqe *wqe,
  1609. int payload_size)
  1610. {
  1611. struct bnxt_qplib_sge ref, sge;
  1612. u32 rq_prod_index;
  1613. struct bnxt_re_sqp_entries *sqp_entry;
  1614. rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
  1615. if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
  1616. return -ENOMEM;
  1617. /* Create 1 SGE to receive the entire
  1618. * ethernet packet
  1619. */
  1620. /* Save the reference from ULP */
  1621. ref.addr = wqe->sg_list[0].addr;
  1622. ref.lkey = wqe->sg_list[0].lkey;
  1623. ref.size = wqe->sg_list[0].size;
  1624. sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
  1625. /* SGE 1 */
  1626. wqe->sg_list[0].addr = sge.addr;
  1627. wqe->sg_list[0].lkey = sge.lkey;
  1628. wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
  1629. sge.size -= wqe->sg_list[0].size;
  1630. sqp_entry->sge.addr = ref.addr;
  1631. sqp_entry->sge.lkey = ref.lkey;
  1632. sqp_entry->sge.size = ref.size;
  1633. /* Store the wrid for reporting completion */
  1634. sqp_entry->wrid = wqe->wr_id;
  1635. /* change the wqe->wrid to table index */
  1636. wqe->wr_id = rq_prod_index;
  1637. return 0;
  1638. }
  1639. static int is_ud_qp(struct bnxt_re_qp *qp)
  1640. {
  1641. return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
  1642. }
  1643. static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
  1644. struct ib_send_wr *wr,
  1645. struct bnxt_qplib_swqe *wqe)
  1646. {
  1647. struct bnxt_re_ah *ah = NULL;
  1648. if (is_ud_qp(qp)) {
  1649. ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
  1650. wqe->send.q_key = ud_wr(wr)->remote_qkey;
  1651. wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
  1652. wqe->send.avid = ah->qplib_ah.id;
  1653. }
  1654. switch (wr->opcode) {
  1655. case IB_WR_SEND:
  1656. wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
  1657. break;
  1658. case IB_WR_SEND_WITH_IMM:
  1659. wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
  1660. wqe->send.imm_data = wr->ex.imm_data;
  1661. break;
  1662. case IB_WR_SEND_WITH_INV:
  1663. wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
  1664. wqe->send.inv_key = wr->ex.invalidate_rkey;
  1665. break;
  1666. default:
  1667. return -EINVAL;
  1668. }
  1669. if (wr->send_flags & IB_SEND_SIGNALED)
  1670. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  1671. if (wr->send_flags & IB_SEND_FENCE)
  1672. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  1673. if (wr->send_flags & IB_SEND_SOLICITED)
  1674. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
  1675. if (wr->send_flags & IB_SEND_INLINE)
  1676. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
  1677. return 0;
  1678. }
  1679. static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
  1680. struct bnxt_qplib_swqe *wqe)
  1681. {
  1682. switch (wr->opcode) {
  1683. case IB_WR_RDMA_WRITE:
  1684. wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
  1685. break;
  1686. case IB_WR_RDMA_WRITE_WITH_IMM:
  1687. wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
  1688. wqe->rdma.imm_data = wr->ex.imm_data;
  1689. break;
  1690. case IB_WR_RDMA_READ:
  1691. wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
  1692. wqe->rdma.inv_key = wr->ex.invalidate_rkey;
  1693. break;
  1694. default:
  1695. return -EINVAL;
  1696. }
  1697. wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
  1698. wqe->rdma.r_key = rdma_wr(wr)->rkey;
  1699. if (wr->send_flags & IB_SEND_SIGNALED)
  1700. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  1701. if (wr->send_flags & IB_SEND_FENCE)
  1702. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  1703. if (wr->send_flags & IB_SEND_SOLICITED)
  1704. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
  1705. if (wr->send_flags & IB_SEND_INLINE)
  1706. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
  1707. return 0;
  1708. }
  1709. static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
  1710. struct bnxt_qplib_swqe *wqe)
  1711. {
  1712. switch (wr->opcode) {
  1713. case IB_WR_ATOMIC_CMP_AND_SWP:
  1714. wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
  1715. wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
  1716. wqe->atomic.swap_data = atomic_wr(wr)->swap;
  1717. break;
  1718. case IB_WR_ATOMIC_FETCH_AND_ADD:
  1719. wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
  1720. wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
  1721. break;
  1722. default:
  1723. return -EINVAL;
  1724. }
  1725. wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
  1726. wqe->atomic.r_key = atomic_wr(wr)->rkey;
  1727. if (wr->send_flags & IB_SEND_SIGNALED)
  1728. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  1729. if (wr->send_flags & IB_SEND_FENCE)
  1730. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  1731. if (wr->send_flags & IB_SEND_SOLICITED)
  1732. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
  1733. return 0;
  1734. }
  1735. static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
  1736. struct bnxt_qplib_swqe *wqe)
  1737. {
  1738. wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
  1739. wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
  1740. if (wr->send_flags & IB_SEND_SIGNALED)
  1741. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  1742. if (wr->send_flags & IB_SEND_FENCE)
  1743. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  1744. if (wr->send_flags & IB_SEND_SOLICITED)
  1745. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
  1746. return 0;
  1747. }
  1748. static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
  1749. struct bnxt_qplib_swqe *wqe)
  1750. {
  1751. struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
  1752. struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
  1753. int access = wr->access;
  1754. wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
  1755. wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
  1756. wqe->frmr.page_list = mr->pages;
  1757. wqe->frmr.page_list_len = mr->npages;
  1758. wqe->frmr.levels = qplib_frpl->hwq.level + 1;
  1759. wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
  1760. if (wr->wr.send_flags & IB_SEND_FENCE)
  1761. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  1762. if (wr->wr.send_flags & IB_SEND_SIGNALED)
  1763. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  1764. if (access & IB_ACCESS_LOCAL_WRITE)
  1765. wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
  1766. if (access & IB_ACCESS_REMOTE_READ)
  1767. wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
  1768. if (access & IB_ACCESS_REMOTE_WRITE)
  1769. wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
  1770. if (access & IB_ACCESS_REMOTE_ATOMIC)
  1771. wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
  1772. if (access & IB_ACCESS_MW_BIND)
  1773. wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
  1774. wqe->frmr.l_key = wr->key;
  1775. wqe->frmr.length = wr->mr->length;
  1776. wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
  1777. wqe->frmr.va = wr->mr->iova;
  1778. return 0;
  1779. }
  1780. static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
  1781. struct ib_send_wr *wr,
  1782. struct bnxt_qplib_swqe *wqe)
  1783. {
  1784. /* Copy the inline data to the data field */
  1785. u8 *in_data;
  1786. u32 i, sge_len;
  1787. void *sge_addr;
  1788. in_data = wqe->inline_data;
  1789. for (i = 0; i < wr->num_sge; i++) {
  1790. sge_addr = (void *)(unsigned long)
  1791. wr->sg_list[i].addr;
  1792. sge_len = wr->sg_list[i].length;
  1793. if ((sge_len + wqe->inline_len) >
  1794. BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
  1795. dev_err(rdev_to_dev(rdev),
  1796. "Inline data size requested > supported value");
  1797. return -EINVAL;
  1798. }
  1799. sge_len = wr->sg_list[i].length;
  1800. memcpy(in_data, sge_addr, sge_len);
  1801. in_data += wr->sg_list[i].length;
  1802. wqe->inline_len += wr->sg_list[i].length;
  1803. }
  1804. return wqe->inline_len;
  1805. }
  1806. static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
  1807. struct ib_send_wr *wr,
  1808. struct bnxt_qplib_swqe *wqe)
  1809. {
  1810. int payload_sz = 0;
  1811. if (wr->send_flags & IB_SEND_INLINE)
  1812. payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
  1813. else
  1814. payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
  1815. wqe->num_sge);
  1816. return payload_sz;
  1817. }
  1818. static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
  1819. {
  1820. if ((qp->ib_qp.qp_type == IB_QPT_UD ||
  1821. qp->ib_qp.qp_type == IB_QPT_GSI ||
  1822. qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
  1823. qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
  1824. int qp_attr_mask;
  1825. struct ib_qp_attr qp_attr;
  1826. qp_attr_mask = IB_QP_STATE;
  1827. qp_attr.qp_state = IB_QPS_RTS;
  1828. bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
  1829. qp->qplib_qp.wqe_cnt = 0;
  1830. }
  1831. }
  1832. static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
  1833. struct bnxt_re_qp *qp,
  1834. struct ib_send_wr *wr)
  1835. {
  1836. struct bnxt_qplib_swqe wqe;
  1837. int rc = 0, payload_sz = 0;
  1838. unsigned long flags;
  1839. spin_lock_irqsave(&qp->sq_lock, flags);
  1840. memset(&wqe, 0, sizeof(wqe));
  1841. while (wr) {
  1842. /* House keeping */
  1843. memset(&wqe, 0, sizeof(wqe));
  1844. /* Common */
  1845. wqe.num_sge = wr->num_sge;
  1846. if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
  1847. dev_err(rdev_to_dev(rdev),
  1848. "Limit exceeded for Send SGEs");
  1849. rc = -EINVAL;
  1850. goto bad;
  1851. }
  1852. payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
  1853. if (payload_sz < 0) {
  1854. rc = -EINVAL;
  1855. goto bad;
  1856. }
  1857. wqe.wr_id = wr->wr_id;
  1858. wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
  1859. rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
  1860. if (!rc)
  1861. rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
  1862. bad:
  1863. if (rc) {
  1864. dev_err(rdev_to_dev(rdev),
  1865. "Post send failed opcode = %#x rc = %d",
  1866. wr->opcode, rc);
  1867. break;
  1868. }
  1869. wr = wr->next;
  1870. }
  1871. bnxt_qplib_post_send_db(&qp->qplib_qp);
  1872. bnxt_ud_qp_hw_stall_workaround(qp);
  1873. spin_unlock_irqrestore(&qp->sq_lock, flags);
  1874. return rc;
  1875. }
  1876. int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
  1877. struct ib_send_wr **bad_wr)
  1878. {
  1879. struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
  1880. struct bnxt_qplib_swqe wqe;
  1881. int rc = 0, payload_sz = 0;
  1882. unsigned long flags;
  1883. spin_lock_irqsave(&qp->sq_lock, flags);
  1884. while (wr) {
  1885. /* House keeping */
  1886. memset(&wqe, 0, sizeof(wqe));
  1887. /* Common */
  1888. wqe.num_sge = wr->num_sge;
  1889. if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
  1890. dev_err(rdev_to_dev(qp->rdev),
  1891. "Limit exceeded for Send SGEs");
  1892. rc = -EINVAL;
  1893. goto bad;
  1894. }
  1895. payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
  1896. if (payload_sz < 0) {
  1897. rc = -EINVAL;
  1898. goto bad;
  1899. }
  1900. wqe.wr_id = wr->wr_id;
  1901. switch (wr->opcode) {
  1902. case IB_WR_SEND:
  1903. case IB_WR_SEND_WITH_IMM:
  1904. if (ib_qp->qp_type == IB_QPT_GSI) {
  1905. rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
  1906. payload_sz);
  1907. if (rc)
  1908. goto bad;
  1909. wqe.rawqp1.lflags |=
  1910. SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
  1911. }
  1912. switch (wr->send_flags) {
  1913. case IB_SEND_IP_CSUM:
  1914. wqe.rawqp1.lflags |=
  1915. SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
  1916. break;
  1917. default:
  1918. break;
  1919. }
  1920. /* Fall thru to build the wqe */
  1921. case IB_WR_SEND_WITH_INV:
  1922. rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
  1923. break;
  1924. case IB_WR_RDMA_WRITE:
  1925. case IB_WR_RDMA_WRITE_WITH_IMM:
  1926. case IB_WR_RDMA_READ:
  1927. rc = bnxt_re_build_rdma_wqe(wr, &wqe);
  1928. break;
  1929. case IB_WR_ATOMIC_CMP_AND_SWP:
  1930. case IB_WR_ATOMIC_FETCH_AND_ADD:
  1931. rc = bnxt_re_build_atomic_wqe(wr, &wqe);
  1932. break;
  1933. case IB_WR_RDMA_READ_WITH_INV:
  1934. dev_err(rdev_to_dev(qp->rdev),
  1935. "RDMA Read with Invalidate is not supported");
  1936. rc = -EINVAL;
  1937. goto bad;
  1938. case IB_WR_LOCAL_INV:
  1939. rc = bnxt_re_build_inv_wqe(wr, &wqe);
  1940. break;
  1941. case IB_WR_REG_MR:
  1942. rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
  1943. break;
  1944. default:
  1945. /* Unsupported WRs */
  1946. dev_err(rdev_to_dev(qp->rdev),
  1947. "WR (%#x) is not supported", wr->opcode);
  1948. rc = -EINVAL;
  1949. goto bad;
  1950. }
  1951. if (!rc)
  1952. rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
  1953. bad:
  1954. if (rc) {
  1955. dev_err(rdev_to_dev(qp->rdev),
  1956. "post_send failed op:%#x qps = %#x rc = %d\n",
  1957. wr->opcode, qp->qplib_qp.state, rc);
  1958. *bad_wr = wr;
  1959. break;
  1960. }
  1961. wr = wr->next;
  1962. }
  1963. bnxt_qplib_post_send_db(&qp->qplib_qp);
  1964. bnxt_ud_qp_hw_stall_workaround(qp);
  1965. spin_unlock_irqrestore(&qp->sq_lock, flags);
  1966. return rc;
  1967. }
  1968. static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
  1969. struct bnxt_re_qp *qp,
  1970. struct ib_recv_wr *wr)
  1971. {
  1972. struct bnxt_qplib_swqe wqe;
  1973. int rc = 0;
  1974. memset(&wqe, 0, sizeof(wqe));
  1975. while (wr) {
  1976. /* House keeping */
  1977. memset(&wqe, 0, sizeof(wqe));
  1978. /* Common */
  1979. wqe.num_sge = wr->num_sge;
  1980. if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
  1981. dev_err(rdev_to_dev(rdev),
  1982. "Limit exceeded for Receive SGEs");
  1983. rc = -EINVAL;
  1984. break;
  1985. }
  1986. bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
  1987. wqe.wr_id = wr->wr_id;
  1988. wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
  1989. rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
  1990. if (rc)
  1991. break;
  1992. wr = wr->next;
  1993. }
  1994. if (!rc)
  1995. bnxt_qplib_post_recv_db(&qp->qplib_qp);
  1996. return rc;
  1997. }
  1998. int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
  1999. struct ib_recv_wr **bad_wr)
  2000. {
  2001. struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
  2002. struct bnxt_qplib_swqe wqe;
  2003. int rc = 0, payload_sz = 0;
  2004. unsigned long flags;
  2005. u32 count = 0;
  2006. spin_lock_irqsave(&qp->rq_lock, flags);
  2007. while (wr) {
  2008. /* House keeping */
  2009. memset(&wqe, 0, sizeof(wqe));
  2010. /* Common */
  2011. wqe.num_sge = wr->num_sge;
  2012. if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
  2013. dev_err(rdev_to_dev(qp->rdev),
  2014. "Limit exceeded for Receive SGEs");
  2015. rc = -EINVAL;
  2016. *bad_wr = wr;
  2017. break;
  2018. }
  2019. payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
  2020. wr->num_sge);
  2021. wqe.wr_id = wr->wr_id;
  2022. wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
  2023. if (ib_qp->qp_type == IB_QPT_GSI)
  2024. rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
  2025. payload_sz);
  2026. if (!rc)
  2027. rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
  2028. if (rc) {
  2029. *bad_wr = wr;
  2030. break;
  2031. }
  2032. /* Ring DB if the RQEs posted reaches a threshold value */
  2033. if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
  2034. bnxt_qplib_post_recv_db(&qp->qplib_qp);
  2035. count = 0;
  2036. }
  2037. wr = wr->next;
  2038. }
  2039. if (count)
  2040. bnxt_qplib_post_recv_db(&qp->qplib_qp);
  2041. spin_unlock_irqrestore(&qp->rq_lock, flags);
  2042. return rc;
  2043. }
  2044. /* Completion Queues */
  2045. int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
  2046. {
  2047. struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
  2048. struct bnxt_re_dev *rdev = cq->rdev;
  2049. int rc;
  2050. struct bnxt_qplib_nq *nq = cq->qplib_cq.nq;
  2051. rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
  2052. if (rc) {
  2053. dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
  2054. return rc;
  2055. }
  2056. if (!IS_ERR_OR_NULL(cq->umem))
  2057. ib_umem_release(cq->umem);
  2058. if (cq) {
  2059. kfree(cq->cql);
  2060. kfree(cq);
  2061. }
  2062. atomic_dec(&rdev->cq_count);
  2063. nq->budget--;
  2064. return 0;
  2065. }
  2066. struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
  2067. const struct ib_cq_init_attr *attr,
  2068. struct ib_ucontext *context,
  2069. struct ib_udata *udata)
  2070. {
  2071. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  2072. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  2073. struct bnxt_re_cq *cq = NULL;
  2074. int rc, entries;
  2075. int cqe = attr->cqe;
  2076. struct bnxt_qplib_nq *nq = NULL;
  2077. unsigned int nq_alloc_cnt;
  2078. /* Validate CQ fields */
  2079. if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
  2080. dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
  2081. return ERR_PTR(-EINVAL);
  2082. }
  2083. cq = kzalloc(sizeof(*cq), GFP_KERNEL);
  2084. if (!cq)
  2085. return ERR_PTR(-ENOMEM);
  2086. cq->rdev = rdev;
  2087. cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
  2088. entries = roundup_pow_of_two(cqe + 1);
  2089. if (entries > dev_attr->max_cq_wqes + 1)
  2090. entries = dev_attr->max_cq_wqes + 1;
  2091. if (context) {
  2092. struct bnxt_re_cq_req req;
  2093. struct bnxt_re_ucontext *uctx = container_of
  2094. (context,
  2095. struct bnxt_re_ucontext,
  2096. ib_uctx);
  2097. if (ib_copy_from_udata(&req, udata, sizeof(req))) {
  2098. rc = -EFAULT;
  2099. goto fail;
  2100. }
  2101. cq->umem = ib_umem_get(context, req.cq_va,
  2102. entries * sizeof(struct cq_base),
  2103. IB_ACCESS_LOCAL_WRITE, 1);
  2104. if (IS_ERR(cq->umem)) {
  2105. rc = PTR_ERR(cq->umem);
  2106. goto fail;
  2107. }
  2108. cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
  2109. cq->qplib_cq.nmap = cq->umem->nmap;
  2110. cq->qplib_cq.dpi = &uctx->dpi;
  2111. } else {
  2112. cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
  2113. cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
  2114. GFP_KERNEL);
  2115. if (!cq->cql) {
  2116. rc = -ENOMEM;
  2117. goto fail;
  2118. }
  2119. cq->qplib_cq.dpi = &rdev->dpi_privileged;
  2120. cq->qplib_cq.sghead = NULL;
  2121. cq->qplib_cq.nmap = 0;
  2122. }
  2123. /*
  2124. * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
  2125. * used for getting the NQ index.
  2126. */
  2127. nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
  2128. nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
  2129. cq->qplib_cq.max_wqe = entries;
  2130. cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
  2131. cq->qplib_cq.nq = nq;
  2132. rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
  2133. if (rc) {
  2134. dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
  2135. goto fail;
  2136. }
  2137. cq->ib_cq.cqe = entries;
  2138. cq->cq_period = cq->qplib_cq.period;
  2139. nq->budget++;
  2140. atomic_inc(&rdev->cq_count);
  2141. if (context) {
  2142. struct bnxt_re_cq_resp resp;
  2143. resp.cqid = cq->qplib_cq.id;
  2144. resp.tail = cq->qplib_cq.hwq.cons;
  2145. resp.phase = cq->qplib_cq.period;
  2146. resp.rsvd = 0;
  2147. rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
  2148. if (rc) {
  2149. dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
  2150. bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
  2151. goto c2fail;
  2152. }
  2153. }
  2154. return &cq->ib_cq;
  2155. c2fail:
  2156. if (context)
  2157. ib_umem_release(cq->umem);
  2158. fail:
  2159. kfree(cq->cql);
  2160. kfree(cq);
  2161. return ERR_PTR(rc);
  2162. }
  2163. static u8 __req_to_ib_wc_status(u8 qstatus)
  2164. {
  2165. switch (qstatus) {
  2166. case CQ_REQ_STATUS_OK:
  2167. return IB_WC_SUCCESS;
  2168. case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
  2169. return IB_WC_BAD_RESP_ERR;
  2170. case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
  2171. return IB_WC_LOC_LEN_ERR;
  2172. case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
  2173. return IB_WC_LOC_QP_OP_ERR;
  2174. case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
  2175. return IB_WC_LOC_PROT_ERR;
  2176. case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
  2177. return IB_WC_GENERAL_ERR;
  2178. case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
  2179. return IB_WC_REM_INV_REQ_ERR;
  2180. case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
  2181. return IB_WC_REM_ACCESS_ERR;
  2182. case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
  2183. return IB_WC_REM_OP_ERR;
  2184. case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
  2185. return IB_WC_RNR_RETRY_EXC_ERR;
  2186. case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
  2187. return IB_WC_RETRY_EXC_ERR;
  2188. case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
  2189. return IB_WC_WR_FLUSH_ERR;
  2190. default:
  2191. return IB_WC_GENERAL_ERR;
  2192. }
  2193. return 0;
  2194. }
  2195. static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
  2196. {
  2197. switch (qstatus) {
  2198. case CQ_RES_RAWETH_QP1_STATUS_OK:
  2199. return IB_WC_SUCCESS;
  2200. case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
  2201. return IB_WC_LOC_ACCESS_ERR;
  2202. case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
  2203. return IB_WC_LOC_LEN_ERR;
  2204. case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
  2205. return IB_WC_LOC_PROT_ERR;
  2206. case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
  2207. return IB_WC_LOC_QP_OP_ERR;
  2208. case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
  2209. return IB_WC_GENERAL_ERR;
  2210. case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
  2211. return IB_WC_WR_FLUSH_ERR;
  2212. case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
  2213. return IB_WC_WR_FLUSH_ERR;
  2214. default:
  2215. return IB_WC_GENERAL_ERR;
  2216. }
  2217. }
  2218. static u8 __rc_to_ib_wc_status(u8 qstatus)
  2219. {
  2220. switch (qstatus) {
  2221. case CQ_RES_RC_STATUS_OK:
  2222. return IB_WC_SUCCESS;
  2223. case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
  2224. return IB_WC_LOC_ACCESS_ERR;
  2225. case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
  2226. return IB_WC_LOC_LEN_ERR;
  2227. case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
  2228. return IB_WC_LOC_PROT_ERR;
  2229. case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
  2230. return IB_WC_LOC_QP_OP_ERR;
  2231. case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
  2232. return IB_WC_GENERAL_ERR;
  2233. case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
  2234. return IB_WC_REM_INV_REQ_ERR;
  2235. case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
  2236. return IB_WC_WR_FLUSH_ERR;
  2237. case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
  2238. return IB_WC_WR_FLUSH_ERR;
  2239. default:
  2240. return IB_WC_GENERAL_ERR;
  2241. }
  2242. }
  2243. static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
  2244. {
  2245. switch (cqe->type) {
  2246. case BNXT_QPLIB_SWQE_TYPE_SEND:
  2247. wc->opcode = IB_WC_SEND;
  2248. break;
  2249. case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
  2250. wc->opcode = IB_WC_SEND;
  2251. wc->wc_flags |= IB_WC_WITH_IMM;
  2252. break;
  2253. case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
  2254. wc->opcode = IB_WC_SEND;
  2255. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  2256. break;
  2257. case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
  2258. wc->opcode = IB_WC_RDMA_WRITE;
  2259. break;
  2260. case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
  2261. wc->opcode = IB_WC_RDMA_WRITE;
  2262. wc->wc_flags |= IB_WC_WITH_IMM;
  2263. break;
  2264. case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
  2265. wc->opcode = IB_WC_RDMA_READ;
  2266. break;
  2267. case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
  2268. wc->opcode = IB_WC_COMP_SWAP;
  2269. break;
  2270. case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
  2271. wc->opcode = IB_WC_FETCH_ADD;
  2272. break;
  2273. case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
  2274. wc->opcode = IB_WC_LOCAL_INV;
  2275. break;
  2276. case BNXT_QPLIB_SWQE_TYPE_REG_MR:
  2277. wc->opcode = IB_WC_REG_MR;
  2278. break;
  2279. default:
  2280. wc->opcode = IB_WC_SEND;
  2281. break;
  2282. }
  2283. wc->status = __req_to_ib_wc_status(cqe->status);
  2284. }
  2285. static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
  2286. u16 raweth_qp1_flags2)
  2287. {
  2288. bool is_ipv6 = false, is_ipv4 = false;
  2289. /* raweth_qp1_flags Bit 9-6 indicates itype */
  2290. if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
  2291. != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
  2292. return -1;
  2293. if (raweth_qp1_flags2 &
  2294. CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
  2295. raweth_qp1_flags2 &
  2296. CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
  2297. /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
  2298. (raweth_qp1_flags2 &
  2299. CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
  2300. (is_ipv6 = true) : (is_ipv4 = true);
  2301. return ((is_ipv6) ?
  2302. BNXT_RE_ROCEV2_IPV6_PACKET :
  2303. BNXT_RE_ROCEV2_IPV4_PACKET);
  2304. } else {
  2305. return BNXT_RE_ROCE_V1_PACKET;
  2306. }
  2307. }
  2308. static int bnxt_re_to_ib_nw_type(int nw_type)
  2309. {
  2310. u8 nw_hdr_type = 0xFF;
  2311. switch (nw_type) {
  2312. case BNXT_RE_ROCE_V1_PACKET:
  2313. nw_hdr_type = RDMA_NETWORK_ROCE_V1;
  2314. break;
  2315. case BNXT_RE_ROCEV2_IPV4_PACKET:
  2316. nw_hdr_type = RDMA_NETWORK_IPV4;
  2317. break;
  2318. case BNXT_RE_ROCEV2_IPV6_PACKET:
  2319. nw_hdr_type = RDMA_NETWORK_IPV6;
  2320. break;
  2321. }
  2322. return nw_hdr_type;
  2323. }
  2324. static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
  2325. void *rq_hdr_buf)
  2326. {
  2327. u8 *tmp_buf = NULL;
  2328. struct ethhdr *eth_hdr;
  2329. u16 eth_type;
  2330. bool rc = false;
  2331. tmp_buf = (u8 *)rq_hdr_buf;
  2332. /*
  2333. * If dest mac is not same as I/F mac, this could be a
  2334. * loopback address or multicast address, check whether
  2335. * it is a loopback packet
  2336. */
  2337. if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
  2338. tmp_buf += 4;
  2339. /* Check the ether type */
  2340. eth_hdr = (struct ethhdr *)tmp_buf;
  2341. eth_type = ntohs(eth_hdr->h_proto);
  2342. switch (eth_type) {
  2343. case ETH_P_IBOE:
  2344. rc = true;
  2345. break;
  2346. case ETH_P_IP:
  2347. case ETH_P_IPV6: {
  2348. u32 len;
  2349. struct udphdr *udp_hdr;
  2350. len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
  2351. sizeof(struct ipv6hdr));
  2352. tmp_buf += sizeof(struct ethhdr) + len;
  2353. udp_hdr = (struct udphdr *)tmp_buf;
  2354. if (ntohs(udp_hdr->dest) ==
  2355. ROCE_V2_UDP_DPORT)
  2356. rc = true;
  2357. break;
  2358. }
  2359. default:
  2360. break;
  2361. }
  2362. }
  2363. return rc;
  2364. }
  2365. static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
  2366. struct bnxt_qplib_cqe *cqe)
  2367. {
  2368. struct bnxt_re_dev *rdev = qp1_qp->rdev;
  2369. struct bnxt_re_sqp_entries *sqp_entry = NULL;
  2370. struct bnxt_re_qp *qp = rdev->qp1_sqp;
  2371. struct ib_send_wr *swr;
  2372. struct ib_ud_wr udwr;
  2373. struct ib_recv_wr rwr;
  2374. int pkt_type = 0;
  2375. u32 tbl_idx;
  2376. void *rq_hdr_buf;
  2377. dma_addr_t rq_hdr_buf_map;
  2378. dma_addr_t shrq_hdr_buf_map;
  2379. u32 offset = 0;
  2380. u32 skip_bytes = 0;
  2381. struct ib_sge s_sge[2];
  2382. struct ib_sge r_sge[2];
  2383. int rc;
  2384. memset(&udwr, 0, sizeof(udwr));
  2385. memset(&rwr, 0, sizeof(rwr));
  2386. memset(&s_sge, 0, sizeof(s_sge));
  2387. memset(&r_sge, 0, sizeof(r_sge));
  2388. swr = &udwr.wr;
  2389. tbl_idx = cqe->wr_id;
  2390. rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
  2391. (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
  2392. rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
  2393. tbl_idx);
  2394. /* Shadow QP header buffer */
  2395. shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
  2396. tbl_idx);
  2397. sqp_entry = &rdev->sqp_tbl[tbl_idx];
  2398. /* Store this cqe */
  2399. memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
  2400. sqp_entry->qp1_qp = qp1_qp;
  2401. /* Find packet type from the cqe */
  2402. pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
  2403. cqe->raweth_qp1_flags2);
  2404. if (pkt_type < 0) {
  2405. dev_err(rdev_to_dev(rdev), "Invalid packet\n");
  2406. return -EINVAL;
  2407. }
  2408. /* Adjust the offset for the user buffer and post in the rq */
  2409. if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
  2410. offset = 20;
  2411. /*
  2412. * QP1 loopback packet has 4 bytes of internal header before
  2413. * ether header. Skip these four bytes.
  2414. */
  2415. if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
  2416. skip_bytes = 4;
  2417. /* First send SGE . Skip the ether header*/
  2418. s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
  2419. + skip_bytes;
  2420. s_sge[0].lkey = 0xFFFFFFFF;
  2421. s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
  2422. BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
  2423. /* Second Send SGE */
  2424. s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
  2425. BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
  2426. if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
  2427. s_sge[1].addr += 8;
  2428. s_sge[1].lkey = 0xFFFFFFFF;
  2429. s_sge[1].length = 256;
  2430. /* First recv SGE */
  2431. r_sge[0].addr = shrq_hdr_buf_map;
  2432. r_sge[0].lkey = 0xFFFFFFFF;
  2433. r_sge[0].length = 40;
  2434. r_sge[1].addr = sqp_entry->sge.addr + offset;
  2435. r_sge[1].lkey = sqp_entry->sge.lkey;
  2436. r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
  2437. /* Create receive work request */
  2438. rwr.num_sge = 2;
  2439. rwr.sg_list = r_sge;
  2440. rwr.wr_id = tbl_idx;
  2441. rwr.next = NULL;
  2442. rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
  2443. if (rc) {
  2444. dev_err(rdev_to_dev(rdev),
  2445. "Failed to post Rx buffers to shadow QP");
  2446. return -ENOMEM;
  2447. }
  2448. swr->num_sge = 2;
  2449. swr->sg_list = s_sge;
  2450. swr->wr_id = tbl_idx;
  2451. swr->opcode = IB_WR_SEND;
  2452. swr->next = NULL;
  2453. udwr.ah = &rdev->sqp_ah->ib_ah;
  2454. udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
  2455. udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
  2456. /* post data received in the send queue */
  2457. rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
  2458. return 0;
  2459. }
  2460. static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
  2461. struct bnxt_qplib_cqe *cqe)
  2462. {
  2463. wc->opcode = IB_WC_RECV;
  2464. wc->status = __rawqp1_to_ib_wc_status(cqe->status);
  2465. wc->wc_flags |= IB_WC_GRH;
  2466. }
  2467. static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
  2468. u16 *vid, u8 *sl)
  2469. {
  2470. bool ret = false;
  2471. u32 metadata;
  2472. u16 tpid;
  2473. metadata = orig_cqe->raweth_qp1_metadata;
  2474. if (orig_cqe->raweth_qp1_flags2 &
  2475. CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
  2476. tpid = ((metadata &
  2477. CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
  2478. CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
  2479. if (tpid == ETH_P_8021Q) {
  2480. *vid = metadata &
  2481. CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
  2482. *sl = (metadata &
  2483. CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
  2484. CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
  2485. ret = true;
  2486. }
  2487. }
  2488. return ret;
  2489. }
  2490. static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
  2491. struct bnxt_qplib_cqe *cqe)
  2492. {
  2493. wc->opcode = IB_WC_RECV;
  2494. wc->status = __rc_to_ib_wc_status(cqe->status);
  2495. if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
  2496. wc->wc_flags |= IB_WC_WITH_IMM;
  2497. if (cqe->flags & CQ_RES_RC_FLAGS_INV)
  2498. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  2499. if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
  2500. (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
  2501. wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
  2502. }
  2503. static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
  2504. struct ib_wc *wc,
  2505. struct bnxt_qplib_cqe *cqe)
  2506. {
  2507. struct bnxt_re_dev *rdev = qp->rdev;
  2508. struct bnxt_re_qp *qp1_qp = NULL;
  2509. struct bnxt_qplib_cqe *orig_cqe = NULL;
  2510. struct bnxt_re_sqp_entries *sqp_entry = NULL;
  2511. int nw_type;
  2512. u32 tbl_idx;
  2513. u16 vlan_id;
  2514. u8 sl;
  2515. tbl_idx = cqe->wr_id;
  2516. sqp_entry = &rdev->sqp_tbl[tbl_idx];
  2517. qp1_qp = sqp_entry->qp1_qp;
  2518. orig_cqe = &sqp_entry->cqe;
  2519. wc->wr_id = sqp_entry->wrid;
  2520. wc->byte_len = orig_cqe->length;
  2521. wc->qp = &qp1_qp->ib_qp;
  2522. wc->ex.imm_data = orig_cqe->immdata;
  2523. wc->src_qp = orig_cqe->src_qp;
  2524. memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
  2525. if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
  2526. wc->vlan_id = vlan_id;
  2527. wc->sl = sl;
  2528. wc->wc_flags |= IB_WC_WITH_VLAN;
  2529. }
  2530. wc->port_num = 1;
  2531. wc->vendor_err = orig_cqe->status;
  2532. wc->opcode = IB_WC_RECV;
  2533. wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
  2534. wc->wc_flags |= IB_WC_GRH;
  2535. nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
  2536. orig_cqe->raweth_qp1_flags2);
  2537. if (nw_type >= 0) {
  2538. wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
  2539. wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
  2540. }
  2541. }
  2542. static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
  2543. struct bnxt_qplib_cqe *cqe)
  2544. {
  2545. wc->opcode = IB_WC_RECV;
  2546. wc->status = __rc_to_ib_wc_status(cqe->status);
  2547. if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
  2548. wc->wc_flags |= IB_WC_WITH_IMM;
  2549. if (cqe->flags & CQ_RES_RC_FLAGS_INV)
  2550. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  2551. if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
  2552. (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
  2553. wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
  2554. }
  2555. static int send_phantom_wqe(struct bnxt_re_qp *qp)
  2556. {
  2557. struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
  2558. unsigned long flags;
  2559. int rc = 0;
  2560. spin_lock_irqsave(&qp->sq_lock, flags);
  2561. rc = bnxt_re_bind_fence_mw(lib_qp);
  2562. if (!rc) {
  2563. lib_qp->sq.phantom_wqe_cnt++;
  2564. dev_dbg(&lib_qp->sq.hwq.pdev->dev,
  2565. "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
  2566. lib_qp->id, lib_qp->sq.hwq.prod,
  2567. HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
  2568. lib_qp->sq.phantom_wqe_cnt);
  2569. }
  2570. spin_unlock_irqrestore(&qp->sq_lock, flags);
  2571. return rc;
  2572. }
  2573. int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
  2574. {
  2575. struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
  2576. struct bnxt_re_qp *qp;
  2577. struct bnxt_qplib_cqe *cqe;
  2578. int i, ncqe, budget;
  2579. struct bnxt_qplib_q *sq;
  2580. struct bnxt_qplib_qp *lib_qp;
  2581. u32 tbl_idx;
  2582. struct bnxt_re_sqp_entries *sqp_entry = NULL;
  2583. unsigned long flags;
  2584. spin_lock_irqsave(&cq->cq_lock, flags);
  2585. budget = min_t(u32, num_entries, cq->max_cql);
  2586. num_entries = budget;
  2587. if (!cq->cql) {
  2588. dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
  2589. goto exit;
  2590. }
  2591. cqe = &cq->cql[0];
  2592. while (budget) {
  2593. lib_qp = NULL;
  2594. ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
  2595. if (lib_qp) {
  2596. sq = &lib_qp->sq;
  2597. if (sq->send_phantom) {
  2598. qp = container_of(lib_qp,
  2599. struct bnxt_re_qp, qplib_qp);
  2600. if (send_phantom_wqe(qp) == -ENOMEM)
  2601. dev_err(rdev_to_dev(cq->rdev),
  2602. "Phantom failed! Scheduled to send again\n");
  2603. else
  2604. sq->send_phantom = false;
  2605. }
  2606. }
  2607. if (ncqe < budget)
  2608. ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
  2609. cqe + ncqe,
  2610. budget - ncqe);
  2611. if (!ncqe)
  2612. break;
  2613. for (i = 0; i < ncqe; i++, cqe++) {
  2614. /* Transcribe each qplib_wqe back to ib_wc */
  2615. memset(wc, 0, sizeof(*wc));
  2616. wc->wr_id = cqe->wr_id;
  2617. wc->byte_len = cqe->length;
  2618. qp = container_of
  2619. ((struct bnxt_qplib_qp *)
  2620. (unsigned long)(cqe->qp_handle),
  2621. struct bnxt_re_qp, qplib_qp);
  2622. if (!qp) {
  2623. dev_err(rdev_to_dev(cq->rdev),
  2624. "POLL CQ : bad QP handle");
  2625. continue;
  2626. }
  2627. wc->qp = &qp->ib_qp;
  2628. wc->ex.imm_data = cqe->immdata;
  2629. wc->src_qp = cqe->src_qp;
  2630. memcpy(wc->smac, cqe->smac, ETH_ALEN);
  2631. wc->port_num = 1;
  2632. wc->vendor_err = cqe->status;
  2633. switch (cqe->opcode) {
  2634. case CQ_BASE_CQE_TYPE_REQ:
  2635. if (qp->qplib_qp.id ==
  2636. qp->rdev->qp1_sqp->qplib_qp.id) {
  2637. /* Handle this completion with
  2638. * the stored completion
  2639. */
  2640. memset(wc, 0, sizeof(*wc));
  2641. continue;
  2642. }
  2643. bnxt_re_process_req_wc(wc, cqe);
  2644. break;
  2645. case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
  2646. if (!cqe->status) {
  2647. int rc = 0;
  2648. rc = bnxt_re_process_raw_qp_pkt_rx
  2649. (qp, cqe);
  2650. if (!rc) {
  2651. memset(wc, 0, sizeof(*wc));
  2652. continue;
  2653. }
  2654. cqe->status = -1;
  2655. }
  2656. /* Errors need not be looped back.
  2657. * But change the wr_id to the one
  2658. * stored in the table
  2659. */
  2660. tbl_idx = cqe->wr_id;
  2661. sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
  2662. wc->wr_id = sqp_entry->wrid;
  2663. bnxt_re_process_res_rawqp1_wc(wc, cqe);
  2664. break;
  2665. case CQ_BASE_CQE_TYPE_RES_RC:
  2666. bnxt_re_process_res_rc_wc(wc, cqe);
  2667. break;
  2668. case CQ_BASE_CQE_TYPE_RES_UD:
  2669. if (qp->qplib_qp.id ==
  2670. qp->rdev->qp1_sqp->qplib_qp.id) {
  2671. /* Handle this completion with
  2672. * the stored completion
  2673. */
  2674. if (cqe->status) {
  2675. continue;
  2676. } else {
  2677. bnxt_re_process_res_shadow_qp_wc
  2678. (qp, wc, cqe);
  2679. break;
  2680. }
  2681. }
  2682. bnxt_re_process_res_ud_wc(wc, cqe);
  2683. break;
  2684. default:
  2685. dev_err(rdev_to_dev(cq->rdev),
  2686. "POLL CQ : type 0x%x not handled",
  2687. cqe->opcode);
  2688. continue;
  2689. }
  2690. wc++;
  2691. budget--;
  2692. }
  2693. }
  2694. exit:
  2695. spin_unlock_irqrestore(&cq->cq_lock, flags);
  2696. return num_entries - budget;
  2697. }
  2698. int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
  2699. enum ib_cq_notify_flags ib_cqn_flags)
  2700. {
  2701. struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
  2702. int type = 0, rc = 0;
  2703. unsigned long flags;
  2704. spin_lock_irqsave(&cq->cq_lock, flags);
  2705. /* Trigger on the very next completion */
  2706. if (ib_cqn_flags & IB_CQ_NEXT_COMP)
  2707. type = DBR_DBR_TYPE_CQ_ARMALL;
  2708. /* Trigger on the next solicited completion */
  2709. else if (ib_cqn_flags & IB_CQ_SOLICITED)
  2710. type = DBR_DBR_TYPE_CQ_ARMSE;
  2711. /* Poll to see if there are missed events */
  2712. if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
  2713. !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
  2714. rc = 1;
  2715. goto exit;
  2716. }
  2717. bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
  2718. exit:
  2719. spin_unlock_irqrestore(&cq->cq_lock, flags);
  2720. return rc;
  2721. }
  2722. /* Memory Regions */
  2723. struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
  2724. {
  2725. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  2726. struct bnxt_re_dev *rdev = pd->rdev;
  2727. struct bnxt_re_mr *mr;
  2728. u64 pbl = 0;
  2729. int rc;
  2730. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  2731. if (!mr)
  2732. return ERR_PTR(-ENOMEM);
  2733. mr->rdev = rdev;
  2734. mr->qplib_mr.pd = &pd->qplib_pd;
  2735. mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
  2736. mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
  2737. /* Allocate and register 0 as the address */
  2738. rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
  2739. if (rc)
  2740. goto fail;
  2741. mr->qplib_mr.hwq.level = PBL_LVL_MAX;
  2742. mr->qplib_mr.total_size = -1; /* Infinte length */
  2743. rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
  2744. if (rc)
  2745. goto fail_mr;
  2746. mr->ib_mr.lkey = mr->qplib_mr.lkey;
  2747. if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
  2748. IB_ACCESS_REMOTE_ATOMIC))
  2749. mr->ib_mr.rkey = mr->ib_mr.lkey;
  2750. atomic_inc(&rdev->mr_count);
  2751. return &mr->ib_mr;
  2752. fail_mr:
  2753. bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
  2754. fail:
  2755. kfree(mr);
  2756. return ERR_PTR(rc);
  2757. }
  2758. int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
  2759. {
  2760. struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
  2761. struct bnxt_re_dev *rdev = mr->rdev;
  2762. int rc;
  2763. rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
  2764. if (rc) {
  2765. dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
  2766. return rc;
  2767. }
  2768. if (mr->pages) {
  2769. rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
  2770. &mr->qplib_frpl);
  2771. kfree(mr->pages);
  2772. mr->npages = 0;
  2773. mr->pages = NULL;
  2774. }
  2775. if (!IS_ERR_OR_NULL(mr->ib_umem))
  2776. ib_umem_release(mr->ib_umem);
  2777. kfree(mr);
  2778. atomic_dec(&rdev->mr_count);
  2779. return rc;
  2780. }
  2781. static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
  2782. {
  2783. struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
  2784. if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
  2785. return -ENOMEM;
  2786. mr->pages[mr->npages++] = addr;
  2787. return 0;
  2788. }
  2789. int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
  2790. unsigned int *sg_offset)
  2791. {
  2792. struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
  2793. mr->npages = 0;
  2794. return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
  2795. }
  2796. struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
  2797. u32 max_num_sg)
  2798. {
  2799. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  2800. struct bnxt_re_dev *rdev = pd->rdev;
  2801. struct bnxt_re_mr *mr = NULL;
  2802. int rc;
  2803. if (type != IB_MR_TYPE_MEM_REG) {
  2804. dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
  2805. return ERR_PTR(-EINVAL);
  2806. }
  2807. if (max_num_sg > MAX_PBL_LVL_1_PGS)
  2808. return ERR_PTR(-EINVAL);
  2809. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  2810. if (!mr)
  2811. return ERR_PTR(-ENOMEM);
  2812. mr->rdev = rdev;
  2813. mr->qplib_mr.pd = &pd->qplib_pd;
  2814. mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
  2815. mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
  2816. rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
  2817. if (rc)
  2818. goto fail;
  2819. mr->ib_mr.lkey = mr->qplib_mr.lkey;
  2820. mr->ib_mr.rkey = mr->ib_mr.lkey;
  2821. mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
  2822. if (!mr->pages) {
  2823. rc = -ENOMEM;
  2824. goto fail;
  2825. }
  2826. rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
  2827. &mr->qplib_frpl, max_num_sg);
  2828. if (rc) {
  2829. dev_err(rdev_to_dev(rdev),
  2830. "Failed to allocate HW FR page list");
  2831. goto fail_mr;
  2832. }
  2833. atomic_inc(&rdev->mr_count);
  2834. return &mr->ib_mr;
  2835. fail_mr:
  2836. bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
  2837. fail:
  2838. kfree(mr->pages);
  2839. kfree(mr);
  2840. return ERR_PTR(rc);
  2841. }
  2842. struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
  2843. struct ib_udata *udata)
  2844. {
  2845. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  2846. struct bnxt_re_dev *rdev = pd->rdev;
  2847. struct bnxt_re_mw *mw;
  2848. int rc;
  2849. mw = kzalloc(sizeof(*mw), GFP_KERNEL);
  2850. if (!mw)
  2851. return ERR_PTR(-ENOMEM);
  2852. mw->rdev = rdev;
  2853. mw->qplib_mw.pd = &pd->qplib_pd;
  2854. mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
  2855. CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
  2856. CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
  2857. rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
  2858. if (rc) {
  2859. dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
  2860. goto fail;
  2861. }
  2862. mw->ib_mw.rkey = mw->qplib_mw.rkey;
  2863. atomic_inc(&rdev->mw_count);
  2864. return &mw->ib_mw;
  2865. fail:
  2866. kfree(mw);
  2867. return ERR_PTR(rc);
  2868. }
  2869. int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
  2870. {
  2871. struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
  2872. struct bnxt_re_dev *rdev = mw->rdev;
  2873. int rc;
  2874. rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
  2875. if (rc) {
  2876. dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
  2877. return rc;
  2878. }
  2879. kfree(mw);
  2880. atomic_dec(&rdev->mw_count);
  2881. return rc;
  2882. }
  2883. /* uverbs */
  2884. struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
  2885. u64 virt_addr, int mr_access_flags,
  2886. struct ib_udata *udata)
  2887. {
  2888. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  2889. struct bnxt_re_dev *rdev = pd->rdev;
  2890. struct bnxt_re_mr *mr;
  2891. struct ib_umem *umem;
  2892. u64 *pbl_tbl, *pbl_tbl_orig;
  2893. int i, umem_pgs, pages, rc;
  2894. struct scatterlist *sg;
  2895. int entry;
  2896. if (length > BNXT_RE_MAX_MR_SIZE) {
  2897. dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
  2898. length, BNXT_RE_MAX_MR_SIZE);
  2899. return ERR_PTR(-ENOMEM);
  2900. }
  2901. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  2902. if (!mr)
  2903. return ERR_PTR(-ENOMEM);
  2904. mr->rdev = rdev;
  2905. mr->qplib_mr.pd = &pd->qplib_pd;
  2906. mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
  2907. mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
  2908. umem = ib_umem_get(ib_pd->uobject->context, start, length,
  2909. mr_access_flags, 0);
  2910. if (IS_ERR(umem)) {
  2911. dev_err(rdev_to_dev(rdev), "Failed to get umem");
  2912. rc = -EFAULT;
  2913. goto free_mr;
  2914. }
  2915. mr->ib_umem = umem;
  2916. rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
  2917. if (rc) {
  2918. dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
  2919. goto release_umem;
  2920. }
  2921. /* The fixed portion of the rkey is the same as the lkey */
  2922. mr->ib_mr.rkey = mr->qplib_mr.rkey;
  2923. mr->qplib_mr.va = virt_addr;
  2924. umem_pgs = ib_umem_page_count(umem);
  2925. if (!umem_pgs) {
  2926. dev_err(rdev_to_dev(rdev), "umem is invalid!");
  2927. rc = -EINVAL;
  2928. goto free_mrw;
  2929. }
  2930. mr->qplib_mr.total_size = length;
  2931. pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
  2932. if (!pbl_tbl) {
  2933. rc = -EINVAL;
  2934. goto free_mrw;
  2935. }
  2936. pbl_tbl_orig = pbl_tbl;
  2937. if (umem->hugetlb) {
  2938. dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
  2939. rc = -EFAULT;
  2940. goto fail;
  2941. }
  2942. if (umem->page_shift != PAGE_SHIFT) {
  2943. dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
  2944. rc = -EFAULT;
  2945. goto fail;
  2946. }
  2947. /* Map umem buf ptrs to the PBL */
  2948. for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
  2949. pages = sg_dma_len(sg) >> umem->page_shift;
  2950. for (i = 0; i < pages; i++, pbl_tbl++)
  2951. *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
  2952. }
  2953. rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
  2954. umem_pgs, false);
  2955. if (rc) {
  2956. dev_err(rdev_to_dev(rdev), "Failed to register user MR");
  2957. goto fail;
  2958. }
  2959. kfree(pbl_tbl_orig);
  2960. mr->ib_mr.lkey = mr->qplib_mr.lkey;
  2961. mr->ib_mr.rkey = mr->qplib_mr.lkey;
  2962. atomic_inc(&rdev->mr_count);
  2963. return &mr->ib_mr;
  2964. fail:
  2965. kfree(pbl_tbl_orig);
  2966. free_mrw:
  2967. bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
  2968. release_umem:
  2969. ib_umem_release(umem);
  2970. free_mr:
  2971. kfree(mr);
  2972. return ERR_PTR(rc);
  2973. }
  2974. struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
  2975. struct ib_udata *udata)
  2976. {
  2977. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  2978. struct bnxt_re_uctx_resp resp;
  2979. struct bnxt_re_ucontext *uctx;
  2980. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  2981. int rc;
  2982. dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
  2983. ibdev->uverbs_abi_ver);
  2984. if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
  2985. dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
  2986. BNXT_RE_ABI_VERSION);
  2987. return ERR_PTR(-EPERM);
  2988. }
  2989. uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
  2990. if (!uctx)
  2991. return ERR_PTR(-ENOMEM);
  2992. uctx->rdev = rdev;
  2993. uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
  2994. if (!uctx->shpg) {
  2995. rc = -ENOMEM;
  2996. goto fail;
  2997. }
  2998. spin_lock_init(&uctx->sh_lock);
  2999. resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
  3000. resp.max_qp = rdev->qplib_ctx.qpc_count;
  3001. resp.pg_size = PAGE_SIZE;
  3002. resp.cqe_sz = sizeof(struct cq_base);
  3003. resp.max_cqd = dev_attr->max_cq_wqes;
  3004. resp.rsvd = 0;
  3005. rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
  3006. if (rc) {
  3007. dev_err(rdev_to_dev(rdev), "Failed to copy user context");
  3008. rc = -EFAULT;
  3009. goto cfail;
  3010. }
  3011. return &uctx->ib_uctx;
  3012. cfail:
  3013. free_page((unsigned long)uctx->shpg);
  3014. uctx->shpg = NULL;
  3015. fail:
  3016. kfree(uctx);
  3017. return ERR_PTR(rc);
  3018. }
  3019. int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
  3020. {
  3021. struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
  3022. struct bnxt_re_ucontext,
  3023. ib_uctx);
  3024. struct bnxt_re_dev *rdev = uctx->rdev;
  3025. int rc = 0;
  3026. if (uctx->shpg)
  3027. free_page((unsigned long)uctx->shpg);
  3028. if (uctx->dpi.dbr) {
  3029. /* Free DPI only if this is the first PD allocated by the
  3030. * application and mark the context dpi as NULL
  3031. */
  3032. rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
  3033. &rdev->qplib_res.dpi_tbl,
  3034. &uctx->dpi);
  3035. if (rc)
  3036. dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
  3037. /* Don't fail, continue*/
  3038. uctx->dpi.dbr = NULL;
  3039. }
  3040. kfree(uctx);
  3041. return 0;
  3042. }
  3043. /* Helper function to mmap the virtual memory from user app */
  3044. int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
  3045. {
  3046. struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
  3047. struct bnxt_re_ucontext,
  3048. ib_uctx);
  3049. struct bnxt_re_dev *rdev = uctx->rdev;
  3050. u64 pfn;
  3051. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  3052. return -EINVAL;
  3053. if (vma->vm_pgoff) {
  3054. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  3055. if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  3056. PAGE_SIZE, vma->vm_page_prot)) {
  3057. dev_err(rdev_to_dev(rdev), "Failed to map DPI");
  3058. return -EAGAIN;
  3059. }
  3060. } else {
  3061. pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
  3062. if (remap_pfn_range(vma, vma->vm_start,
  3063. pfn, PAGE_SIZE, vma->vm_page_prot)) {
  3064. dev_err(rdev_to_dev(rdev),
  3065. "Failed to map shared page");
  3066. return -EAGAIN;
  3067. }
  3068. }
  3069. return 0;
  3070. }