ib_verbs.c 102 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803
  1. /*
  2. * Broadcom NetXtreme-E RoCE driver.
  3. *
  4. * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
  5. * Broadcom refers to Broadcom Limited and/or its subsidiaries.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or without
  14. * modification, are permitted provided that the following conditions
  15. * are met:
  16. *
  17. * 1. Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. * 2. Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in
  21. * the documentation and/or other materials provided with the
  22. * distribution.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. *
  36. * Description: IB Verbs interpreter
  37. */
  38. #include <linux/interrupt.h>
  39. #include <linux/types.h>
  40. #include <linux/pci.h>
  41. #include <linux/netdevice.h>
  42. #include <linux/if_ether.h>
  43. #include <rdma/ib_verbs.h>
  44. #include <rdma/ib_user_verbs.h>
  45. #include <rdma/ib_umem.h>
  46. #include <rdma/ib_addr.h>
  47. #include <rdma/ib_mad.h>
  48. #include <rdma/ib_cache.h>
  49. #include "bnxt_ulp.h"
  50. #include "roce_hsi.h"
  51. #include "qplib_res.h"
  52. #include "qplib_sp.h"
  53. #include "qplib_fp.h"
  54. #include "qplib_rcfw.h"
  55. #include "bnxt_re.h"
  56. #include "ib_verbs.h"
  57. #include <rdma/bnxt_re-abi.h>
  58. static int __from_ib_access_flags(int iflags)
  59. {
  60. int qflags = 0;
  61. if (iflags & IB_ACCESS_LOCAL_WRITE)
  62. qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
  63. if (iflags & IB_ACCESS_REMOTE_READ)
  64. qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
  65. if (iflags & IB_ACCESS_REMOTE_WRITE)
  66. qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
  67. if (iflags & IB_ACCESS_REMOTE_ATOMIC)
  68. qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
  69. if (iflags & IB_ACCESS_MW_BIND)
  70. qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
  71. if (iflags & IB_ZERO_BASED)
  72. qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
  73. if (iflags & IB_ACCESS_ON_DEMAND)
  74. qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
  75. return qflags;
  76. };
  77. static enum ib_access_flags __to_ib_access_flags(int qflags)
  78. {
  79. enum ib_access_flags iflags = 0;
  80. if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
  81. iflags |= IB_ACCESS_LOCAL_WRITE;
  82. if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
  83. iflags |= IB_ACCESS_REMOTE_WRITE;
  84. if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
  85. iflags |= IB_ACCESS_REMOTE_READ;
  86. if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
  87. iflags |= IB_ACCESS_REMOTE_ATOMIC;
  88. if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
  89. iflags |= IB_ACCESS_MW_BIND;
  90. if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
  91. iflags |= IB_ZERO_BASED;
  92. if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
  93. iflags |= IB_ACCESS_ON_DEMAND;
  94. return iflags;
  95. };
  96. static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
  97. struct bnxt_qplib_sge *sg_list, int num)
  98. {
  99. int i, total = 0;
  100. for (i = 0; i < num; i++) {
  101. sg_list[i].addr = ib_sg_list[i].addr;
  102. sg_list[i].lkey = ib_sg_list[i].lkey;
  103. sg_list[i].size = ib_sg_list[i].length;
  104. total += sg_list[i].size;
  105. }
  106. return total;
  107. }
  108. /* Device */
  109. struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
  110. {
  111. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  112. struct net_device *netdev = NULL;
  113. rcu_read_lock();
  114. if (rdev)
  115. netdev = rdev->netdev;
  116. if (netdev)
  117. dev_hold(netdev);
  118. rcu_read_unlock();
  119. return netdev;
  120. }
  121. int bnxt_re_query_device(struct ib_device *ibdev,
  122. struct ib_device_attr *ib_attr,
  123. struct ib_udata *udata)
  124. {
  125. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  126. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  127. memset(ib_attr, 0, sizeof(*ib_attr));
  128. memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
  129. min(sizeof(dev_attr->fw_ver),
  130. sizeof(ib_attr->fw_ver)));
  131. bnxt_qplib_get_guid(rdev->netdev->dev_addr,
  132. (u8 *)&ib_attr->sys_image_guid);
  133. ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
  134. ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
  135. ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
  136. ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
  137. ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
  138. ib_attr->max_qp = dev_attr->max_qp;
  139. ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
  140. ib_attr->device_cap_flags =
  141. IB_DEVICE_CURR_QP_STATE_MOD
  142. | IB_DEVICE_RC_RNR_NAK_GEN
  143. | IB_DEVICE_SHUTDOWN_PORT
  144. | IB_DEVICE_SYS_IMAGE_GUID
  145. | IB_DEVICE_LOCAL_DMA_LKEY
  146. | IB_DEVICE_RESIZE_MAX_WR
  147. | IB_DEVICE_PORT_ACTIVE_EVENT
  148. | IB_DEVICE_N_NOTIFY_CQ
  149. | IB_DEVICE_MEM_WINDOW
  150. | IB_DEVICE_MEM_WINDOW_TYPE_2B
  151. | IB_DEVICE_MEM_MGT_EXTENSIONS;
  152. ib_attr->max_sge = dev_attr->max_qp_sges;
  153. ib_attr->max_sge_rd = dev_attr->max_qp_sges;
  154. ib_attr->max_cq = dev_attr->max_cq;
  155. ib_attr->max_cqe = dev_attr->max_cq_wqes;
  156. ib_attr->max_mr = dev_attr->max_mr;
  157. ib_attr->max_pd = dev_attr->max_pd;
  158. ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
  159. ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
  160. ib_attr->atomic_cap = IB_ATOMIC_NONE;
  161. ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
  162. ib_attr->max_ee_rd_atom = 0;
  163. ib_attr->max_res_rd_atom = 0;
  164. ib_attr->max_ee_init_rd_atom = 0;
  165. ib_attr->max_ee = 0;
  166. ib_attr->max_rdd = 0;
  167. ib_attr->max_mw = dev_attr->max_mw;
  168. ib_attr->max_raw_ipv6_qp = 0;
  169. ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
  170. ib_attr->max_mcast_grp = 0;
  171. ib_attr->max_mcast_qp_attach = 0;
  172. ib_attr->max_total_mcast_qp_attach = 0;
  173. ib_attr->max_ah = dev_attr->max_ah;
  174. ib_attr->max_fmr = 0;
  175. ib_attr->max_map_per_fmr = 0;
  176. ib_attr->max_srq = dev_attr->max_srq;
  177. ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
  178. ib_attr->max_srq_sge = dev_attr->max_srq_sges;
  179. ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
  180. ib_attr->max_pkeys = 1;
  181. ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
  182. return 0;
  183. }
  184. int bnxt_re_modify_device(struct ib_device *ibdev,
  185. int device_modify_mask,
  186. struct ib_device_modify *device_modify)
  187. {
  188. switch (device_modify_mask) {
  189. case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
  190. /* Modify the GUID requires the modification of the GID table */
  191. /* GUID should be made as READ-ONLY */
  192. break;
  193. case IB_DEVICE_MODIFY_NODE_DESC:
  194. /* Node Desc should be made as READ-ONLY */
  195. break;
  196. default:
  197. break;
  198. }
  199. return 0;
  200. }
  201. /* Port */
  202. int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
  203. struct ib_port_attr *port_attr)
  204. {
  205. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  206. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  207. memset(port_attr, 0, sizeof(*port_attr));
  208. if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
  209. port_attr->state = IB_PORT_ACTIVE;
  210. port_attr->phys_state = 5;
  211. } else {
  212. port_attr->state = IB_PORT_DOWN;
  213. port_attr->phys_state = 3;
  214. }
  215. port_attr->max_mtu = IB_MTU_4096;
  216. port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
  217. port_attr->gid_tbl_len = dev_attr->max_sgid;
  218. port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
  219. IB_PORT_DEVICE_MGMT_SUP |
  220. IB_PORT_VENDOR_CLASS_SUP |
  221. IB_PORT_IP_BASED_GIDS;
  222. port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
  223. port_attr->bad_pkey_cntr = 0;
  224. port_attr->qkey_viol_cntr = 0;
  225. port_attr->pkey_tbl_len = dev_attr->max_pkey;
  226. port_attr->lid = 0;
  227. port_attr->sm_lid = 0;
  228. port_attr->lmc = 0;
  229. port_attr->max_vl_num = 4;
  230. port_attr->sm_sl = 0;
  231. port_attr->subnet_timeout = 0;
  232. port_attr->init_type_reply = 0;
  233. port_attr->active_speed = rdev->active_speed;
  234. port_attr->active_width = rdev->active_width;
  235. return 0;
  236. }
  237. int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
  238. struct ib_port_immutable *immutable)
  239. {
  240. struct ib_port_attr port_attr;
  241. if (bnxt_re_query_port(ibdev, port_num, &port_attr))
  242. return -EINVAL;
  243. immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
  244. immutable->gid_tbl_len = port_attr.gid_tbl_len;
  245. immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
  246. immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
  247. immutable->max_mad_size = IB_MGMT_MAD_SIZE;
  248. return 0;
  249. }
  250. void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
  251. {
  252. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  253. snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
  254. rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
  255. rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
  256. }
  257. int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
  258. u16 index, u16 *pkey)
  259. {
  260. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  261. /* Ignore port_num */
  262. memset(pkey, 0, sizeof(*pkey));
  263. return bnxt_qplib_get_pkey(&rdev->qplib_res,
  264. &rdev->qplib_res.pkey_tbl, index, pkey);
  265. }
  266. int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
  267. int index, union ib_gid *gid)
  268. {
  269. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  270. int rc = 0;
  271. /* Ignore port_num */
  272. memset(gid, 0, sizeof(*gid));
  273. rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
  274. &rdev->qplib_res.sgid_tbl, index,
  275. (struct bnxt_qplib_gid *)gid);
  276. return rc;
  277. }
  278. int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
  279. {
  280. int rc = 0;
  281. struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
  282. struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
  283. struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
  284. struct bnxt_qplib_gid *gid_to_del;
  285. /* Delete the entry from the hardware */
  286. ctx = *context;
  287. if (!ctx)
  288. return -EINVAL;
  289. if (sgid_tbl && sgid_tbl->active) {
  290. if (ctx->idx >= sgid_tbl->max)
  291. return -EINVAL;
  292. gid_to_del = &sgid_tbl->tbl[ctx->idx];
  293. /* DEL_GID is called in WQ context(netdevice_event_work_handler)
  294. * or via the ib_unregister_device path. In the former case QP1
  295. * may not be destroyed yet, in which case just return as FW
  296. * needs that entry to be present and will fail it's deletion.
  297. * We could get invoked again after QP1 is destroyed OR get an
  298. * ADD_GID call with a different GID value for the same index
  299. * where we issue MODIFY_GID cmd to update the GID entry -- TBD
  300. */
  301. if (ctx->idx == 0 &&
  302. rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
  303. ctx->refcnt == 1 && rdev->qp1_sqp) {
  304. dev_dbg(rdev_to_dev(rdev),
  305. "Trying to delete GID0 while QP1 is alive\n");
  306. return -EFAULT;
  307. }
  308. ctx->refcnt--;
  309. if (!ctx->refcnt) {
  310. rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
  311. if (rc) {
  312. dev_err(rdev_to_dev(rdev),
  313. "Failed to remove GID: %#x", rc);
  314. } else {
  315. ctx_tbl = sgid_tbl->ctx;
  316. ctx_tbl[ctx->idx] = NULL;
  317. kfree(ctx);
  318. }
  319. }
  320. } else {
  321. return -EINVAL;
  322. }
  323. return rc;
  324. }
  325. int bnxt_re_add_gid(const union ib_gid *gid,
  326. const struct ib_gid_attr *attr, void **context)
  327. {
  328. int rc;
  329. u32 tbl_idx = 0;
  330. u16 vlan_id = 0xFFFF;
  331. struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
  332. struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
  333. struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
  334. if ((attr->ndev) && is_vlan_dev(attr->ndev))
  335. vlan_id = vlan_dev_vlan_id(attr->ndev);
  336. rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
  337. rdev->qplib_res.netdev->dev_addr,
  338. vlan_id, true, &tbl_idx);
  339. if (rc == -EALREADY) {
  340. ctx_tbl = sgid_tbl->ctx;
  341. ctx_tbl[tbl_idx]->refcnt++;
  342. *context = ctx_tbl[tbl_idx];
  343. return 0;
  344. }
  345. if (rc < 0) {
  346. dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
  347. return rc;
  348. }
  349. ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  350. if (!ctx)
  351. return -ENOMEM;
  352. ctx_tbl = sgid_tbl->ctx;
  353. ctx->idx = tbl_idx;
  354. ctx->refcnt = 1;
  355. ctx_tbl[tbl_idx] = ctx;
  356. *context = ctx;
  357. return rc;
  358. }
  359. enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
  360. u8 port_num)
  361. {
  362. return IB_LINK_LAYER_ETHERNET;
  363. }
  364. #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
  365. static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
  366. {
  367. struct bnxt_re_fence_data *fence = &pd->fence;
  368. struct ib_mr *ib_mr = &fence->mr->ib_mr;
  369. struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
  370. memset(wqe, 0, sizeof(*wqe));
  371. wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
  372. wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
  373. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  374. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  375. wqe->bind.zero_based = false;
  376. wqe->bind.parent_l_key = ib_mr->lkey;
  377. wqe->bind.va = (u64)(unsigned long)fence->va;
  378. wqe->bind.length = fence->size;
  379. wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
  380. wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
  381. /* Save the initial rkey in fence structure for now;
  382. * wqe->bind.r_key will be set at (re)bind time.
  383. */
  384. fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
  385. }
  386. static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
  387. {
  388. struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
  389. qplib_qp);
  390. struct ib_pd *ib_pd = qp->ib_qp.pd;
  391. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  392. struct bnxt_re_fence_data *fence = &pd->fence;
  393. struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
  394. struct bnxt_qplib_swqe wqe;
  395. int rc;
  396. memcpy(&wqe, fence_wqe, sizeof(wqe));
  397. wqe.bind.r_key = fence->bind_rkey;
  398. fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
  399. dev_dbg(rdev_to_dev(qp->rdev),
  400. "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
  401. wqe.bind.r_key, qp->qplib_qp.id, pd);
  402. rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
  403. if (rc) {
  404. dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
  405. return rc;
  406. }
  407. bnxt_qplib_post_send_db(&qp->qplib_qp);
  408. return rc;
  409. }
  410. static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
  411. {
  412. struct bnxt_re_fence_data *fence = &pd->fence;
  413. struct bnxt_re_dev *rdev = pd->rdev;
  414. struct device *dev = &rdev->en_dev->pdev->dev;
  415. struct bnxt_re_mr *mr = fence->mr;
  416. if (fence->mw) {
  417. bnxt_re_dealloc_mw(fence->mw);
  418. fence->mw = NULL;
  419. }
  420. if (mr) {
  421. if (mr->ib_mr.rkey)
  422. bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
  423. true);
  424. if (mr->ib_mr.lkey)
  425. bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
  426. kfree(mr);
  427. fence->mr = NULL;
  428. }
  429. if (fence->dma_addr) {
  430. dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
  431. DMA_BIDIRECTIONAL);
  432. fence->dma_addr = 0;
  433. }
  434. }
  435. static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
  436. {
  437. int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
  438. struct bnxt_re_fence_data *fence = &pd->fence;
  439. struct bnxt_re_dev *rdev = pd->rdev;
  440. struct device *dev = &rdev->en_dev->pdev->dev;
  441. struct bnxt_re_mr *mr = NULL;
  442. dma_addr_t dma_addr = 0;
  443. struct ib_mw *mw;
  444. u64 pbl_tbl;
  445. int rc;
  446. dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
  447. DMA_BIDIRECTIONAL);
  448. rc = dma_mapping_error(dev, dma_addr);
  449. if (rc) {
  450. dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
  451. rc = -EIO;
  452. fence->dma_addr = 0;
  453. goto fail;
  454. }
  455. fence->dma_addr = dma_addr;
  456. /* Allocate a MR */
  457. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  458. if (!mr) {
  459. rc = -ENOMEM;
  460. goto fail;
  461. }
  462. fence->mr = mr;
  463. mr->rdev = rdev;
  464. mr->qplib_mr.pd = &pd->qplib_pd;
  465. mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
  466. mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
  467. rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
  468. if (rc) {
  469. dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
  470. goto fail;
  471. }
  472. /* Register MR */
  473. mr->ib_mr.lkey = mr->qplib_mr.lkey;
  474. mr->qplib_mr.va = (u64)(unsigned long)fence->va;
  475. mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
  476. pbl_tbl = dma_addr;
  477. rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
  478. BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
  479. if (rc) {
  480. dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
  481. goto fail;
  482. }
  483. mr->ib_mr.rkey = mr->qplib_mr.rkey;
  484. /* Create a fence MW only for kernel consumers */
  485. mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
  486. if (IS_ERR(mw)) {
  487. dev_err(rdev_to_dev(rdev),
  488. "Failed to create fence-MW for PD: %p\n", pd);
  489. rc = PTR_ERR(mw);
  490. goto fail;
  491. }
  492. fence->mw = mw;
  493. bnxt_re_create_fence_wqe(pd);
  494. return 0;
  495. fail:
  496. bnxt_re_destroy_fence_mr(pd);
  497. return rc;
  498. }
  499. /* Protection Domains */
  500. int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
  501. {
  502. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  503. struct bnxt_re_dev *rdev = pd->rdev;
  504. int rc;
  505. bnxt_re_destroy_fence_mr(pd);
  506. if (pd->qplib_pd.id) {
  507. rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
  508. &rdev->qplib_res.pd_tbl,
  509. &pd->qplib_pd);
  510. if (rc)
  511. dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
  512. }
  513. kfree(pd);
  514. return 0;
  515. }
  516. struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
  517. struct ib_ucontext *ucontext,
  518. struct ib_udata *udata)
  519. {
  520. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  521. struct bnxt_re_ucontext *ucntx = container_of(ucontext,
  522. struct bnxt_re_ucontext,
  523. ib_uctx);
  524. struct bnxt_re_pd *pd;
  525. int rc;
  526. pd = kzalloc(sizeof(*pd), GFP_KERNEL);
  527. if (!pd)
  528. return ERR_PTR(-ENOMEM);
  529. pd->rdev = rdev;
  530. if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
  531. dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
  532. rc = -ENOMEM;
  533. goto fail;
  534. }
  535. if (udata) {
  536. struct bnxt_re_pd_resp resp;
  537. if (!ucntx->dpi.dbr) {
  538. /* Allocate DPI in alloc_pd to avoid failing of
  539. * ibv_devinfo and family of application when DPIs
  540. * are depleted.
  541. */
  542. if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
  543. &ucntx->dpi, ucntx)) {
  544. rc = -ENOMEM;
  545. goto dbfail;
  546. }
  547. }
  548. resp.pdid = pd->qplib_pd.id;
  549. /* Still allow mapping this DBR to the new user PD. */
  550. resp.dpi = ucntx->dpi.dpi;
  551. resp.dbr = (u64)ucntx->dpi.umdbr;
  552. rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
  553. if (rc) {
  554. dev_err(rdev_to_dev(rdev),
  555. "Failed to copy user response\n");
  556. goto dbfail;
  557. }
  558. }
  559. if (!udata)
  560. if (bnxt_re_create_fence_mr(pd))
  561. dev_warn(rdev_to_dev(rdev),
  562. "Failed to create Fence-MR\n");
  563. return &pd->ib_pd;
  564. dbfail:
  565. (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
  566. &pd->qplib_pd);
  567. fail:
  568. kfree(pd);
  569. return ERR_PTR(rc);
  570. }
  571. /* Address Handles */
  572. int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
  573. {
  574. struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
  575. struct bnxt_re_dev *rdev = ah->rdev;
  576. int rc;
  577. rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
  578. if (rc) {
  579. dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
  580. return rc;
  581. }
  582. kfree(ah);
  583. return 0;
  584. }
  585. struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
  586. struct rdma_ah_attr *ah_attr,
  587. struct ib_udata *udata)
  588. {
  589. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  590. struct bnxt_re_dev *rdev = pd->rdev;
  591. struct bnxt_re_ah *ah;
  592. const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
  593. int rc;
  594. u8 nw_type;
  595. struct ib_gid_attr sgid_attr;
  596. if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
  597. dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
  598. return ERR_PTR(-EINVAL);
  599. }
  600. ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
  601. if (!ah)
  602. return ERR_PTR(-ENOMEM);
  603. ah->rdev = rdev;
  604. ah->qplib_ah.pd = &pd->qplib_pd;
  605. /* Supply the configuration for the HW */
  606. memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
  607. sizeof(union ib_gid));
  608. /*
  609. * If RoCE V2 is enabled, stack will have two entries for
  610. * each GID entry. Avoiding this duplicte entry in HW. Dividing
  611. * the GID index by 2 for RoCE V2
  612. */
  613. ah->qplib_ah.sgid_index = grh->sgid_index / 2;
  614. ah->qplib_ah.host_sgid_index = grh->sgid_index;
  615. ah->qplib_ah.traffic_class = grh->traffic_class;
  616. ah->qplib_ah.flow_label = grh->flow_label;
  617. ah->qplib_ah.hop_limit = grh->hop_limit;
  618. ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
  619. if (ib_pd->uobject &&
  620. !rdma_is_multicast_addr((struct in6_addr *)
  621. grh->dgid.raw) &&
  622. !rdma_link_local_addr((struct in6_addr *)
  623. grh->dgid.raw)) {
  624. union ib_gid sgid;
  625. rc = ib_get_cached_gid(&rdev->ibdev, 1,
  626. grh->sgid_index, &sgid,
  627. &sgid_attr);
  628. if (rc) {
  629. dev_err(rdev_to_dev(rdev),
  630. "Failed to query gid at index %d",
  631. grh->sgid_index);
  632. goto fail;
  633. }
  634. dev_put(sgid_attr.ndev);
  635. /* Get network header type for this GID */
  636. nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
  637. switch (nw_type) {
  638. case RDMA_NETWORK_IPV4:
  639. ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
  640. break;
  641. case RDMA_NETWORK_IPV6:
  642. ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
  643. break;
  644. default:
  645. ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
  646. break;
  647. }
  648. }
  649. memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
  650. rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
  651. if (rc) {
  652. dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
  653. goto fail;
  654. }
  655. /* Write AVID to shared page. */
  656. if (ib_pd->uobject) {
  657. struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
  658. struct bnxt_re_ucontext *uctx;
  659. unsigned long flag;
  660. u32 *wrptr;
  661. uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
  662. spin_lock_irqsave(&uctx->sh_lock, flag);
  663. wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
  664. *wrptr = ah->qplib_ah.id;
  665. wmb(); /* make sure cache is updated. */
  666. spin_unlock_irqrestore(&uctx->sh_lock, flag);
  667. }
  668. return &ah->ib_ah;
  669. fail:
  670. kfree(ah);
  671. return ERR_PTR(rc);
  672. }
  673. int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
  674. {
  675. return 0;
  676. }
  677. int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
  678. {
  679. struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
  680. ah_attr->type = ib_ah->type;
  681. rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
  682. memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
  683. rdma_ah_set_grh(ah_attr, NULL, 0,
  684. ah->qplib_ah.host_sgid_index,
  685. 0, ah->qplib_ah.traffic_class);
  686. rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
  687. rdma_ah_set_port_num(ah_attr, 1);
  688. rdma_ah_set_static_rate(ah_attr, 0);
  689. return 0;
  690. }
  691. unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
  692. __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
  693. {
  694. unsigned long flags;
  695. spin_lock_irqsave(&qp->scq->cq_lock, flags);
  696. if (qp->rcq != qp->scq)
  697. spin_lock(&qp->rcq->cq_lock);
  698. else
  699. __acquire(&qp->rcq->cq_lock);
  700. return flags;
  701. }
  702. void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
  703. unsigned long flags)
  704. __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
  705. {
  706. if (qp->rcq != qp->scq)
  707. spin_unlock(&qp->rcq->cq_lock);
  708. else
  709. __release(&qp->rcq->cq_lock);
  710. spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
  711. }
  712. /* Queue Pairs */
  713. int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
  714. {
  715. struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
  716. struct bnxt_re_dev *rdev = qp->rdev;
  717. int rc;
  718. unsigned int flags;
  719. bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
  720. rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
  721. if (rc) {
  722. dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
  723. return rc;
  724. }
  725. flags = bnxt_re_lock_cqs(qp);
  726. bnxt_qplib_clean_qp(&qp->qplib_qp);
  727. bnxt_re_unlock_cqs(qp, flags);
  728. bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
  729. if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
  730. rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
  731. &rdev->sqp_ah->qplib_ah);
  732. if (rc) {
  733. dev_err(rdev_to_dev(rdev),
  734. "Failed to destroy HW AH for shadow QP");
  735. return rc;
  736. }
  737. bnxt_qplib_clean_qp(&qp->qplib_qp);
  738. rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
  739. &rdev->qp1_sqp->qplib_qp);
  740. if (rc) {
  741. dev_err(rdev_to_dev(rdev),
  742. "Failed to destroy Shadow QP");
  743. return rc;
  744. }
  745. mutex_lock(&rdev->qp_lock);
  746. list_del(&rdev->qp1_sqp->list);
  747. atomic_dec(&rdev->qp_count);
  748. mutex_unlock(&rdev->qp_lock);
  749. kfree(rdev->sqp_ah);
  750. kfree(rdev->qp1_sqp);
  751. rdev->qp1_sqp = NULL;
  752. rdev->sqp_ah = NULL;
  753. }
  754. if (!IS_ERR_OR_NULL(qp->rumem))
  755. ib_umem_release(qp->rumem);
  756. if (!IS_ERR_OR_NULL(qp->sumem))
  757. ib_umem_release(qp->sumem);
  758. mutex_lock(&rdev->qp_lock);
  759. list_del(&qp->list);
  760. atomic_dec(&rdev->qp_count);
  761. mutex_unlock(&rdev->qp_lock);
  762. kfree(qp);
  763. return 0;
  764. }
  765. static u8 __from_ib_qp_type(enum ib_qp_type type)
  766. {
  767. switch (type) {
  768. case IB_QPT_GSI:
  769. return CMDQ_CREATE_QP1_TYPE_GSI;
  770. case IB_QPT_RC:
  771. return CMDQ_CREATE_QP_TYPE_RC;
  772. case IB_QPT_UD:
  773. return CMDQ_CREATE_QP_TYPE_UD;
  774. default:
  775. return IB_QPT_MAX;
  776. }
  777. }
  778. static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
  779. struct bnxt_re_qp *qp, struct ib_udata *udata)
  780. {
  781. struct bnxt_re_qp_req ureq;
  782. struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
  783. struct ib_umem *umem;
  784. int bytes = 0;
  785. struct ib_ucontext *context = pd->ib_pd.uobject->context;
  786. struct bnxt_re_ucontext *cntx = container_of(context,
  787. struct bnxt_re_ucontext,
  788. ib_uctx);
  789. if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
  790. return -EFAULT;
  791. bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
  792. /* Consider mapping PSN search memory only for RC QPs. */
  793. if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
  794. bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
  795. bytes = PAGE_ALIGN(bytes);
  796. umem = ib_umem_get(context, ureq.qpsva, bytes,
  797. IB_ACCESS_LOCAL_WRITE, 1);
  798. if (IS_ERR(umem))
  799. return PTR_ERR(umem);
  800. qp->sumem = umem;
  801. qplib_qp->sq.sglist = umem->sg_head.sgl;
  802. qplib_qp->sq.nmap = umem->nmap;
  803. qplib_qp->qp_handle = ureq.qp_handle;
  804. if (!qp->qplib_qp.srq) {
  805. bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
  806. bytes = PAGE_ALIGN(bytes);
  807. umem = ib_umem_get(context, ureq.qprva, bytes,
  808. IB_ACCESS_LOCAL_WRITE, 1);
  809. if (IS_ERR(umem))
  810. goto rqfail;
  811. qp->rumem = umem;
  812. qplib_qp->rq.sglist = umem->sg_head.sgl;
  813. qplib_qp->rq.nmap = umem->nmap;
  814. }
  815. qplib_qp->dpi = &cntx->dpi;
  816. return 0;
  817. rqfail:
  818. ib_umem_release(qp->sumem);
  819. qp->sumem = NULL;
  820. qplib_qp->sq.sglist = NULL;
  821. qplib_qp->sq.nmap = 0;
  822. return PTR_ERR(umem);
  823. }
  824. static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
  825. (struct bnxt_re_pd *pd,
  826. struct bnxt_qplib_res *qp1_res,
  827. struct bnxt_qplib_qp *qp1_qp)
  828. {
  829. struct bnxt_re_dev *rdev = pd->rdev;
  830. struct bnxt_re_ah *ah;
  831. union ib_gid sgid;
  832. int rc;
  833. ah = kzalloc(sizeof(*ah), GFP_KERNEL);
  834. if (!ah)
  835. return NULL;
  836. ah->rdev = rdev;
  837. ah->qplib_ah.pd = &pd->qplib_pd;
  838. rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
  839. if (rc)
  840. goto fail;
  841. /* supply the dgid data same as sgid */
  842. memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
  843. sizeof(union ib_gid));
  844. ah->qplib_ah.sgid_index = 0;
  845. ah->qplib_ah.traffic_class = 0;
  846. ah->qplib_ah.flow_label = 0;
  847. ah->qplib_ah.hop_limit = 1;
  848. ah->qplib_ah.sl = 0;
  849. /* Have DMAC same as SMAC */
  850. ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
  851. rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
  852. if (rc) {
  853. dev_err(rdev_to_dev(rdev),
  854. "Failed to allocate HW AH for Shadow QP");
  855. goto fail;
  856. }
  857. return ah;
  858. fail:
  859. kfree(ah);
  860. return NULL;
  861. }
  862. static struct bnxt_re_qp *bnxt_re_create_shadow_qp
  863. (struct bnxt_re_pd *pd,
  864. struct bnxt_qplib_res *qp1_res,
  865. struct bnxt_qplib_qp *qp1_qp)
  866. {
  867. struct bnxt_re_dev *rdev = pd->rdev;
  868. struct bnxt_re_qp *qp;
  869. int rc;
  870. qp = kzalloc(sizeof(*qp), GFP_KERNEL);
  871. if (!qp)
  872. return NULL;
  873. qp->rdev = rdev;
  874. /* Initialize the shadow QP structure from the QP1 values */
  875. ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
  876. qp->qplib_qp.pd = &pd->qplib_pd;
  877. qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
  878. qp->qplib_qp.type = IB_QPT_UD;
  879. qp->qplib_qp.max_inline_data = 0;
  880. qp->qplib_qp.sig_type = true;
  881. /* Shadow QP SQ depth should be same as QP1 RQ depth */
  882. qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
  883. qp->qplib_qp.sq.max_sge = 2;
  884. /* Q full delta can be 1 since it is internal QP */
  885. qp->qplib_qp.sq.q_full_delta = 1;
  886. qp->qplib_qp.scq = qp1_qp->scq;
  887. qp->qplib_qp.rcq = qp1_qp->rcq;
  888. qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
  889. qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
  890. /* Q full delta can be 1 since it is internal QP */
  891. qp->qplib_qp.rq.q_full_delta = 1;
  892. qp->qplib_qp.mtu = qp1_qp->mtu;
  893. qp->qplib_qp.sq_hdr_buf_size = 0;
  894. qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
  895. qp->qplib_qp.dpi = &rdev->dpi_privileged;
  896. rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
  897. if (rc)
  898. goto fail;
  899. rdev->sqp_id = qp->qplib_qp.id;
  900. spin_lock_init(&qp->sq_lock);
  901. INIT_LIST_HEAD(&qp->list);
  902. mutex_lock(&rdev->qp_lock);
  903. list_add_tail(&qp->list, &rdev->qp_list);
  904. atomic_inc(&rdev->qp_count);
  905. mutex_unlock(&rdev->qp_lock);
  906. return qp;
  907. fail:
  908. kfree(qp);
  909. return NULL;
  910. }
  911. struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
  912. struct ib_qp_init_attr *qp_init_attr,
  913. struct ib_udata *udata)
  914. {
  915. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  916. struct bnxt_re_dev *rdev = pd->rdev;
  917. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  918. struct bnxt_re_qp *qp;
  919. struct bnxt_re_cq *cq;
  920. struct bnxt_re_srq *srq;
  921. int rc, entries;
  922. if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
  923. (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
  924. (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
  925. (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
  926. (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
  927. return ERR_PTR(-EINVAL);
  928. qp = kzalloc(sizeof(*qp), GFP_KERNEL);
  929. if (!qp)
  930. return ERR_PTR(-ENOMEM);
  931. qp->rdev = rdev;
  932. ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
  933. qp->qplib_qp.pd = &pd->qplib_pd;
  934. qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
  935. qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
  936. if (qp->qplib_qp.type == IB_QPT_MAX) {
  937. dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
  938. qp->qplib_qp.type);
  939. rc = -EINVAL;
  940. goto fail;
  941. }
  942. qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
  943. qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
  944. IB_SIGNAL_ALL_WR) ? true : false);
  945. qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
  946. if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
  947. qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
  948. if (qp_init_attr->send_cq) {
  949. cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
  950. ib_cq);
  951. if (!cq) {
  952. dev_err(rdev_to_dev(rdev), "Send CQ not found");
  953. rc = -EINVAL;
  954. goto fail;
  955. }
  956. qp->qplib_qp.scq = &cq->qplib_cq;
  957. qp->scq = cq;
  958. }
  959. if (qp_init_attr->recv_cq) {
  960. cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
  961. ib_cq);
  962. if (!cq) {
  963. dev_err(rdev_to_dev(rdev), "Receive CQ not found");
  964. rc = -EINVAL;
  965. goto fail;
  966. }
  967. qp->qplib_qp.rcq = &cq->qplib_cq;
  968. qp->rcq = cq;
  969. }
  970. if (qp_init_attr->srq) {
  971. srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
  972. ib_srq);
  973. if (!srq) {
  974. dev_err(rdev_to_dev(rdev), "SRQ not found");
  975. rc = -EINVAL;
  976. goto fail;
  977. }
  978. qp->qplib_qp.srq = &srq->qplib_srq;
  979. qp->qplib_qp.rq.max_wqe = 0;
  980. } else {
  981. /* Allocate 1 more than what's provided so posting max doesn't
  982. * mean empty
  983. */
  984. entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
  985. qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
  986. dev_attr->max_qp_wqes + 1);
  987. qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
  988. qp_init_attr->cap.max_recv_wr;
  989. qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
  990. if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
  991. qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
  992. }
  993. qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
  994. if (qp_init_attr->qp_type == IB_QPT_GSI) {
  995. /* Allocate 1 more than what's provided */
  996. entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
  997. qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
  998. dev_attr->max_qp_wqes + 1);
  999. qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
  1000. qp_init_attr->cap.max_send_wr;
  1001. qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
  1002. if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
  1003. qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
  1004. qp->qplib_qp.sq.max_sge++;
  1005. if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
  1006. qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
  1007. qp->qplib_qp.rq_hdr_buf_size =
  1008. BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
  1009. qp->qplib_qp.sq_hdr_buf_size =
  1010. BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
  1011. qp->qplib_qp.dpi = &rdev->dpi_privileged;
  1012. rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
  1013. if (rc) {
  1014. dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
  1015. goto fail;
  1016. }
  1017. /* Create a shadow QP to handle the QP1 traffic */
  1018. rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
  1019. &qp->qplib_qp);
  1020. if (!rdev->qp1_sqp) {
  1021. rc = -EINVAL;
  1022. dev_err(rdev_to_dev(rdev),
  1023. "Failed to create Shadow QP for QP1");
  1024. goto qp_destroy;
  1025. }
  1026. rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
  1027. &qp->qplib_qp);
  1028. if (!rdev->sqp_ah) {
  1029. bnxt_qplib_destroy_qp(&rdev->qplib_res,
  1030. &rdev->qp1_sqp->qplib_qp);
  1031. rc = -EINVAL;
  1032. dev_err(rdev_to_dev(rdev),
  1033. "Failed to create AH entry for ShadowQP");
  1034. goto qp_destroy;
  1035. }
  1036. } else {
  1037. /* Allocate 128 + 1 more than what's provided */
  1038. entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
  1039. BNXT_QPLIB_RESERVED_QP_WRS + 1);
  1040. qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
  1041. dev_attr->max_qp_wqes +
  1042. BNXT_QPLIB_RESERVED_QP_WRS + 1);
  1043. qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
  1044. /*
  1045. * Reserving one slot for Phantom WQE. Application can
  1046. * post one extra entry in this case. But allowing this to avoid
  1047. * unexpected Queue full condition
  1048. */
  1049. qp->qplib_qp.sq.q_full_delta -= 1;
  1050. qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
  1051. qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
  1052. if (udata) {
  1053. rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
  1054. if (rc)
  1055. goto fail;
  1056. } else {
  1057. qp->qplib_qp.dpi = &rdev->dpi_privileged;
  1058. }
  1059. rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
  1060. if (rc) {
  1061. dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
  1062. goto free_umem;
  1063. }
  1064. }
  1065. qp->ib_qp.qp_num = qp->qplib_qp.id;
  1066. spin_lock_init(&qp->sq_lock);
  1067. spin_lock_init(&qp->rq_lock);
  1068. if (udata) {
  1069. struct bnxt_re_qp_resp resp;
  1070. resp.qpid = qp->ib_qp.qp_num;
  1071. resp.rsvd = 0;
  1072. rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
  1073. if (rc) {
  1074. dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
  1075. goto qp_destroy;
  1076. }
  1077. }
  1078. INIT_LIST_HEAD(&qp->list);
  1079. mutex_lock(&rdev->qp_lock);
  1080. list_add_tail(&qp->list, &rdev->qp_list);
  1081. atomic_inc(&rdev->qp_count);
  1082. mutex_unlock(&rdev->qp_lock);
  1083. return &qp->ib_qp;
  1084. qp_destroy:
  1085. bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
  1086. free_umem:
  1087. if (udata) {
  1088. if (qp->rumem)
  1089. ib_umem_release(qp->rumem);
  1090. if (qp->sumem)
  1091. ib_umem_release(qp->sumem);
  1092. }
  1093. fail:
  1094. kfree(qp);
  1095. return ERR_PTR(rc);
  1096. }
  1097. static u8 __from_ib_qp_state(enum ib_qp_state state)
  1098. {
  1099. switch (state) {
  1100. case IB_QPS_RESET:
  1101. return CMDQ_MODIFY_QP_NEW_STATE_RESET;
  1102. case IB_QPS_INIT:
  1103. return CMDQ_MODIFY_QP_NEW_STATE_INIT;
  1104. case IB_QPS_RTR:
  1105. return CMDQ_MODIFY_QP_NEW_STATE_RTR;
  1106. case IB_QPS_RTS:
  1107. return CMDQ_MODIFY_QP_NEW_STATE_RTS;
  1108. case IB_QPS_SQD:
  1109. return CMDQ_MODIFY_QP_NEW_STATE_SQD;
  1110. case IB_QPS_SQE:
  1111. return CMDQ_MODIFY_QP_NEW_STATE_SQE;
  1112. case IB_QPS_ERR:
  1113. default:
  1114. return CMDQ_MODIFY_QP_NEW_STATE_ERR;
  1115. }
  1116. }
  1117. static enum ib_qp_state __to_ib_qp_state(u8 state)
  1118. {
  1119. switch (state) {
  1120. case CMDQ_MODIFY_QP_NEW_STATE_RESET:
  1121. return IB_QPS_RESET;
  1122. case CMDQ_MODIFY_QP_NEW_STATE_INIT:
  1123. return IB_QPS_INIT;
  1124. case CMDQ_MODIFY_QP_NEW_STATE_RTR:
  1125. return IB_QPS_RTR;
  1126. case CMDQ_MODIFY_QP_NEW_STATE_RTS:
  1127. return IB_QPS_RTS;
  1128. case CMDQ_MODIFY_QP_NEW_STATE_SQD:
  1129. return IB_QPS_SQD;
  1130. case CMDQ_MODIFY_QP_NEW_STATE_SQE:
  1131. return IB_QPS_SQE;
  1132. case CMDQ_MODIFY_QP_NEW_STATE_ERR:
  1133. default:
  1134. return IB_QPS_ERR;
  1135. }
  1136. }
  1137. static u32 __from_ib_mtu(enum ib_mtu mtu)
  1138. {
  1139. switch (mtu) {
  1140. case IB_MTU_256:
  1141. return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
  1142. case IB_MTU_512:
  1143. return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
  1144. case IB_MTU_1024:
  1145. return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
  1146. case IB_MTU_2048:
  1147. return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
  1148. case IB_MTU_4096:
  1149. return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
  1150. default:
  1151. return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
  1152. }
  1153. }
  1154. static enum ib_mtu __to_ib_mtu(u32 mtu)
  1155. {
  1156. switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
  1157. case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
  1158. return IB_MTU_256;
  1159. case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
  1160. return IB_MTU_512;
  1161. case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
  1162. return IB_MTU_1024;
  1163. case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
  1164. return IB_MTU_2048;
  1165. case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
  1166. return IB_MTU_4096;
  1167. default:
  1168. return IB_MTU_2048;
  1169. }
  1170. }
  1171. /* Shared Receive Queues */
  1172. int bnxt_re_destroy_srq(struct ib_srq *ib_srq)
  1173. {
  1174. struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
  1175. ib_srq);
  1176. struct bnxt_re_dev *rdev = srq->rdev;
  1177. struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
  1178. struct bnxt_qplib_nq *nq = NULL;
  1179. int rc;
  1180. if (qplib_srq->cq)
  1181. nq = qplib_srq->cq->nq;
  1182. rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
  1183. if (rc) {
  1184. dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
  1185. return rc;
  1186. }
  1187. if (srq->umem)
  1188. ib_umem_release(srq->umem);
  1189. kfree(srq);
  1190. atomic_dec(&rdev->srq_count);
  1191. if (nq)
  1192. nq->budget--;
  1193. return 0;
  1194. }
  1195. static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
  1196. struct bnxt_re_pd *pd,
  1197. struct bnxt_re_srq *srq,
  1198. struct ib_udata *udata)
  1199. {
  1200. struct bnxt_re_srq_req ureq;
  1201. struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
  1202. struct ib_umem *umem;
  1203. int bytes = 0;
  1204. struct ib_ucontext *context = pd->ib_pd.uobject->context;
  1205. struct bnxt_re_ucontext *cntx = container_of(context,
  1206. struct bnxt_re_ucontext,
  1207. ib_uctx);
  1208. if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
  1209. return -EFAULT;
  1210. bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
  1211. bytes = PAGE_ALIGN(bytes);
  1212. umem = ib_umem_get(context, ureq.srqva, bytes,
  1213. IB_ACCESS_LOCAL_WRITE, 1);
  1214. if (IS_ERR(umem))
  1215. return PTR_ERR(umem);
  1216. srq->umem = umem;
  1217. qplib_srq->nmap = umem->nmap;
  1218. qplib_srq->sglist = umem->sg_head.sgl;
  1219. qplib_srq->srq_handle = ureq.srq_handle;
  1220. qplib_srq->dpi = &cntx->dpi;
  1221. return 0;
  1222. }
  1223. struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
  1224. struct ib_srq_init_attr *srq_init_attr,
  1225. struct ib_udata *udata)
  1226. {
  1227. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  1228. struct bnxt_re_dev *rdev = pd->rdev;
  1229. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  1230. struct bnxt_re_srq *srq;
  1231. struct bnxt_qplib_nq *nq = NULL;
  1232. int rc, entries;
  1233. if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
  1234. dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
  1235. rc = -EINVAL;
  1236. goto exit;
  1237. }
  1238. if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
  1239. rc = -ENOTSUPP;
  1240. goto exit;
  1241. }
  1242. srq = kzalloc(sizeof(*srq), GFP_KERNEL);
  1243. if (!srq) {
  1244. rc = -ENOMEM;
  1245. goto exit;
  1246. }
  1247. srq->rdev = rdev;
  1248. srq->qplib_srq.pd = &pd->qplib_pd;
  1249. srq->qplib_srq.dpi = &rdev->dpi_privileged;
  1250. /* Allocate 1 more than what's provided so posting max doesn't
  1251. * mean empty
  1252. */
  1253. entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
  1254. if (entries > dev_attr->max_srq_wqes + 1)
  1255. entries = dev_attr->max_srq_wqes + 1;
  1256. srq->qplib_srq.max_wqe = entries;
  1257. srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
  1258. srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
  1259. srq->srq_limit = srq_init_attr->attr.srq_limit;
  1260. srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
  1261. nq = &rdev->nq[0];
  1262. if (udata) {
  1263. rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
  1264. if (rc)
  1265. goto fail;
  1266. }
  1267. rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
  1268. if (rc) {
  1269. dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
  1270. goto fail;
  1271. }
  1272. if (udata) {
  1273. struct bnxt_re_srq_resp resp;
  1274. resp.srqid = srq->qplib_srq.id;
  1275. rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
  1276. if (rc) {
  1277. dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
  1278. bnxt_qplib_destroy_srq(&rdev->qplib_res,
  1279. &srq->qplib_srq);
  1280. goto exit;
  1281. }
  1282. }
  1283. if (nq)
  1284. nq->budget++;
  1285. atomic_inc(&rdev->srq_count);
  1286. return &srq->ib_srq;
  1287. fail:
  1288. if (srq->umem)
  1289. ib_umem_release(srq->umem);
  1290. kfree(srq);
  1291. exit:
  1292. return ERR_PTR(rc);
  1293. }
  1294. int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
  1295. enum ib_srq_attr_mask srq_attr_mask,
  1296. struct ib_udata *udata)
  1297. {
  1298. struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
  1299. ib_srq);
  1300. struct bnxt_re_dev *rdev = srq->rdev;
  1301. int rc;
  1302. switch (srq_attr_mask) {
  1303. case IB_SRQ_MAX_WR:
  1304. /* SRQ resize is not supported */
  1305. break;
  1306. case IB_SRQ_LIMIT:
  1307. /* Change the SRQ threshold */
  1308. if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
  1309. return -EINVAL;
  1310. srq->qplib_srq.threshold = srq_attr->srq_limit;
  1311. rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
  1312. if (rc) {
  1313. dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
  1314. return rc;
  1315. }
  1316. /* On success, update the shadow */
  1317. srq->srq_limit = srq_attr->srq_limit;
  1318. /* No need to Build and send response back to udata */
  1319. break;
  1320. default:
  1321. dev_err(rdev_to_dev(rdev),
  1322. "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
  1323. return -EINVAL;
  1324. }
  1325. return 0;
  1326. }
  1327. int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
  1328. {
  1329. struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
  1330. ib_srq);
  1331. struct bnxt_re_srq tsrq;
  1332. struct bnxt_re_dev *rdev = srq->rdev;
  1333. int rc;
  1334. /* Get live SRQ attr */
  1335. tsrq.qplib_srq.id = srq->qplib_srq.id;
  1336. rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
  1337. if (rc) {
  1338. dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
  1339. return rc;
  1340. }
  1341. srq_attr->max_wr = srq->qplib_srq.max_wqe;
  1342. srq_attr->max_sge = srq->qplib_srq.max_sge;
  1343. srq_attr->srq_limit = tsrq.qplib_srq.threshold;
  1344. return 0;
  1345. }
  1346. int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, struct ib_recv_wr *wr,
  1347. struct ib_recv_wr **bad_wr)
  1348. {
  1349. struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
  1350. ib_srq);
  1351. struct bnxt_qplib_swqe wqe;
  1352. unsigned long flags;
  1353. int rc = 0;
  1354. spin_lock_irqsave(&srq->lock, flags);
  1355. while (wr) {
  1356. /* Transcribe each ib_recv_wr to qplib_swqe */
  1357. wqe.num_sge = wr->num_sge;
  1358. bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
  1359. wqe.wr_id = wr->wr_id;
  1360. wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
  1361. rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
  1362. if (rc) {
  1363. *bad_wr = wr;
  1364. break;
  1365. }
  1366. wr = wr->next;
  1367. }
  1368. spin_unlock_irqrestore(&srq->lock, flags);
  1369. return rc;
  1370. }
  1371. static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
  1372. struct bnxt_re_qp *qp1_qp,
  1373. int qp_attr_mask)
  1374. {
  1375. struct bnxt_re_qp *qp = rdev->qp1_sqp;
  1376. int rc = 0;
  1377. if (qp_attr_mask & IB_QP_STATE) {
  1378. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
  1379. qp->qplib_qp.state = qp1_qp->qplib_qp.state;
  1380. }
  1381. if (qp_attr_mask & IB_QP_PKEY_INDEX) {
  1382. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
  1383. qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
  1384. }
  1385. if (qp_attr_mask & IB_QP_QKEY) {
  1386. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
  1387. /* Using a Random QKEY */
  1388. qp->qplib_qp.qkey = 0x81818181;
  1389. }
  1390. if (qp_attr_mask & IB_QP_SQ_PSN) {
  1391. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
  1392. qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
  1393. }
  1394. rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
  1395. if (rc)
  1396. dev_err(rdev_to_dev(rdev),
  1397. "Failed to modify Shadow QP for QP1");
  1398. return rc;
  1399. }
  1400. int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
  1401. int qp_attr_mask, struct ib_udata *udata)
  1402. {
  1403. struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
  1404. struct bnxt_re_dev *rdev = qp->rdev;
  1405. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  1406. enum ib_qp_state curr_qp_state, new_qp_state;
  1407. int rc, entries;
  1408. int status;
  1409. union ib_gid sgid;
  1410. struct ib_gid_attr sgid_attr;
  1411. unsigned int flags;
  1412. u8 nw_type;
  1413. qp->qplib_qp.modify_flags = 0;
  1414. if (qp_attr_mask & IB_QP_STATE) {
  1415. curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
  1416. new_qp_state = qp_attr->qp_state;
  1417. if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
  1418. ib_qp->qp_type, qp_attr_mask,
  1419. IB_LINK_LAYER_ETHERNET)) {
  1420. dev_err(rdev_to_dev(rdev),
  1421. "Invalid attribute mask: %#x specified ",
  1422. qp_attr_mask);
  1423. dev_err(rdev_to_dev(rdev),
  1424. "for qpn: %#x type: %#x",
  1425. ib_qp->qp_num, ib_qp->qp_type);
  1426. dev_err(rdev_to_dev(rdev),
  1427. "curr_qp_state=0x%x, new_qp_state=0x%x\n",
  1428. curr_qp_state, new_qp_state);
  1429. return -EINVAL;
  1430. }
  1431. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
  1432. qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
  1433. if (!qp->sumem &&
  1434. qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
  1435. dev_dbg(rdev_to_dev(rdev),
  1436. "Move QP = %p to flush list\n",
  1437. qp);
  1438. flags = bnxt_re_lock_cqs(qp);
  1439. bnxt_qplib_add_flush_qp(&qp->qplib_qp);
  1440. bnxt_re_unlock_cqs(qp, flags);
  1441. }
  1442. if (!qp->sumem &&
  1443. qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
  1444. dev_dbg(rdev_to_dev(rdev),
  1445. "Move QP = %p out of flush list\n",
  1446. qp);
  1447. flags = bnxt_re_lock_cqs(qp);
  1448. bnxt_qplib_clean_qp(&qp->qplib_qp);
  1449. bnxt_re_unlock_cqs(qp, flags);
  1450. }
  1451. }
  1452. if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
  1453. qp->qplib_qp.modify_flags |=
  1454. CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
  1455. qp->qplib_qp.en_sqd_async_notify = true;
  1456. }
  1457. if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
  1458. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
  1459. qp->qplib_qp.access =
  1460. __from_ib_access_flags(qp_attr->qp_access_flags);
  1461. /* LOCAL_WRITE access must be set to allow RC receive */
  1462. qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
  1463. }
  1464. if (qp_attr_mask & IB_QP_PKEY_INDEX) {
  1465. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
  1466. qp->qplib_qp.pkey_index = qp_attr->pkey_index;
  1467. }
  1468. if (qp_attr_mask & IB_QP_QKEY) {
  1469. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
  1470. qp->qplib_qp.qkey = qp_attr->qkey;
  1471. }
  1472. if (qp_attr_mask & IB_QP_AV) {
  1473. const struct ib_global_route *grh =
  1474. rdma_ah_read_grh(&qp_attr->ah_attr);
  1475. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
  1476. CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
  1477. CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
  1478. CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
  1479. CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
  1480. CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
  1481. CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
  1482. memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
  1483. sizeof(qp->qplib_qp.ah.dgid.data));
  1484. qp->qplib_qp.ah.flow_label = grh->flow_label;
  1485. /* If RoCE V2 is enabled, stack will have two entries for
  1486. * each GID entry. Avoiding this duplicte entry in HW. Dividing
  1487. * the GID index by 2 for RoCE V2
  1488. */
  1489. qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
  1490. qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
  1491. qp->qplib_qp.ah.hop_limit = grh->hop_limit;
  1492. qp->qplib_qp.ah.traffic_class = grh->traffic_class;
  1493. qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
  1494. ether_addr_copy(qp->qplib_qp.ah.dmac,
  1495. qp_attr->ah_attr.roce.dmac);
  1496. status = ib_get_cached_gid(&rdev->ibdev, 1,
  1497. grh->sgid_index,
  1498. &sgid, &sgid_attr);
  1499. if (!status) {
  1500. memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
  1501. ETH_ALEN);
  1502. dev_put(sgid_attr.ndev);
  1503. nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
  1504. &sgid);
  1505. switch (nw_type) {
  1506. case RDMA_NETWORK_IPV4:
  1507. qp->qplib_qp.nw_type =
  1508. CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
  1509. break;
  1510. case RDMA_NETWORK_IPV6:
  1511. qp->qplib_qp.nw_type =
  1512. CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
  1513. break;
  1514. default:
  1515. qp->qplib_qp.nw_type =
  1516. CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
  1517. break;
  1518. }
  1519. }
  1520. }
  1521. if (qp_attr_mask & IB_QP_PATH_MTU) {
  1522. qp->qplib_qp.modify_flags |=
  1523. CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
  1524. qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
  1525. qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
  1526. } else if (qp_attr->qp_state == IB_QPS_RTR) {
  1527. qp->qplib_qp.modify_flags |=
  1528. CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
  1529. qp->qplib_qp.path_mtu =
  1530. __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
  1531. qp->qplib_qp.mtu =
  1532. ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
  1533. }
  1534. if (qp_attr_mask & IB_QP_TIMEOUT) {
  1535. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
  1536. qp->qplib_qp.timeout = qp_attr->timeout;
  1537. }
  1538. if (qp_attr_mask & IB_QP_RETRY_CNT) {
  1539. qp->qplib_qp.modify_flags |=
  1540. CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
  1541. qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
  1542. }
  1543. if (qp_attr_mask & IB_QP_RNR_RETRY) {
  1544. qp->qplib_qp.modify_flags |=
  1545. CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
  1546. qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
  1547. }
  1548. if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
  1549. qp->qplib_qp.modify_flags |=
  1550. CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
  1551. qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
  1552. }
  1553. if (qp_attr_mask & IB_QP_RQ_PSN) {
  1554. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
  1555. qp->qplib_qp.rq.psn = qp_attr->rq_psn;
  1556. }
  1557. if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
  1558. qp->qplib_qp.modify_flags |=
  1559. CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
  1560. /* Cap the max_rd_atomic to device max */
  1561. qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
  1562. dev_attr->max_qp_rd_atom);
  1563. }
  1564. if (qp_attr_mask & IB_QP_SQ_PSN) {
  1565. qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
  1566. qp->qplib_qp.sq.psn = qp_attr->sq_psn;
  1567. }
  1568. if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
  1569. if (qp_attr->max_dest_rd_atomic >
  1570. dev_attr->max_qp_init_rd_atom) {
  1571. dev_err(rdev_to_dev(rdev),
  1572. "max_dest_rd_atomic requested%d is > dev_max%d",
  1573. qp_attr->max_dest_rd_atomic,
  1574. dev_attr->max_qp_init_rd_atom);
  1575. return -EINVAL;
  1576. }
  1577. qp->qplib_qp.modify_flags |=
  1578. CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
  1579. qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
  1580. }
  1581. if (qp_attr_mask & IB_QP_CAP) {
  1582. qp->qplib_qp.modify_flags |=
  1583. CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
  1584. CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
  1585. CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
  1586. CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
  1587. CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
  1588. if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
  1589. (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
  1590. (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
  1591. (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
  1592. (qp_attr->cap.max_inline_data >=
  1593. dev_attr->max_inline_data)) {
  1594. dev_err(rdev_to_dev(rdev),
  1595. "Create QP failed - max exceeded");
  1596. return -EINVAL;
  1597. }
  1598. entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
  1599. qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
  1600. dev_attr->max_qp_wqes + 1);
  1601. qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
  1602. qp_attr->cap.max_send_wr;
  1603. /*
  1604. * Reserving one slot for Phantom WQE. Some application can
  1605. * post one extra entry in this case. Allowing this to avoid
  1606. * unexpected Queue full condition
  1607. */
  1608. qp->qplib_qp.sq.q_full_delta -= 1;
  1609. qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
  1610. if (qp->qplib_qp.rq.max_wqe) {
  1611. entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
  1612. qp->qplib_qp.rq.max_wqe =
  1613. min_t(u32, entries, dev_attr->max_qp_wqes + 1);
  1614. qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
  1615. qp_attr->cap.max_recv_wr;
  1616. qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
  1617. } else {
  1618. /* SRQ was used prior, just ignore the RQ caps */
  1619. }
  1620. }
  1621. if (qp_attr_mask & IB_QP_DEST_QPN) {
  1622. qp->qplib_qp.modify_flags |=
  1623. CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
  1624. qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
  1625. }
  1626. rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
  1627. if (rc) {
  1628. dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
  1629. return rc;
  1630. }
  1631. if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
  1632. rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
  1633. return rc;
  1634. }
  1635. int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
  1636. int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
  1637. {
  1638. struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
  1639. struct bnxt_re_dev *rdev = qp->rdev;
  1640. struct bnxt_qplib_qp *qplib_qp;
  1641. int rc;
  1642. qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
  1643. if (!qplib_qp)
  1644. return -ENOMEM;
  1645. qplib_qp->id = qp->qplib_qp.id;
  1646. qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
  1647. rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
  1648. if (rc) {
  1649. dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
  1650. goto out;
  1651. }
  1652. qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
  1653. qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
  1654. qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
  1655. qp_attr->pkey_index = qplib_qp->pkey_index;
  1656. qp_attr->qkey = qplib_qp->qkey;
  1657. qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
  1658. rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
  1659. qplib_qp->ah.host_sgid_index,
  1660. qplib_qp->ah.hop_limit,
  1661. qplib_qp->ah.traffic_class);
  1662. rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
  1663. rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
  1664. ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
  1665. qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
  1666. qp_attr->timeout = qplib_qp->timeout;
  1667. qp_attr->retry_cnt = qplib_qp->retry_cnt;
  1668. qp_attr->rnr_retry = qplib_qp->rnr_retry;
  1669. qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
  1670. qp_attr->rq_psn = qplib_qp->rq.psn;
  1671. qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
  1672. qp_attr->sq_psn = qplib_qp->sq.psn;
  1673. qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
  1674. qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
  1675. IB_SIGNAL_REQ_WR;
  1676. qp_attr->dest_qp_num = qplib_qp->dest_qpn;
  1677. qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
  1678. qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
  1679. qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
  1680. qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
  1681. qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
  1682. qp_init_attr->cap = qp_attr->cap;
  1683. out:
  1684. kfree(qplib_qp);
  1685. return rc;
  1686. }
  1687. /* Routine for sending QP1 packets for RoCE V1 an V2
  1688. */
  1689. static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
  1690. struct ib_send_wr *wr,
  1691. struct bnxt_qplib_swqe *wqe,
  1692. int payload_size)
  1693. {
  1694. struct ib_device *ibdev = &qp->rdev->ibdev;
  1695. struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
  1696. ib_ah);
  1697. struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
  1698. struct bnxt_qplib_sge sge;
  1699. union ib_gid sgid;
  1700. u8 nw_type;
  1701. u16 ether_type;
  1702. struct ib_gid_attr sgid_attr;
  1703. union ib_gid dgid;
  1704. bool is_eth = false;
  1705. bool is_vlan = false;
  1706. bool is_grh = false;
  1707. bool is_udp = false;
  1708. u8 ip_version = 0;
  1709. u16 vlan_id = 0xFFFF;
  1710. void *buf;
  1711. int i, rc = 0;
  1712. memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
  1713. rc = ib_get_cached_gid(ibdev, 1,
  1714. qplib_ah->host_sgid_index, &sgid,
  1715. &sgid_attr);
  1716. if (rc) {
  1717. dev_err(rdev_to_dev(qp->rdev),
  1718. "Failed to query gid at index %d",
  1719. qplib_ah->host_sgid_index);
  1720. return rc;
  1721. }
  1722. if (sgid_attr.ndev) {
  1723. if (is_vlan_dev(sgid_attr.ndev))
  1724. vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
  1725. dev_put(sgid_attr.ndev);
  1726. }
  1727. /* Get network header type for this GID */
  1728. nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
  1729. switch (nw_type) {
  1730. case RDMA_NETWORK_IPV4:
  1731. nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
  1732. break;
  1733. case RDMA_NETWORK_IPV6:
  1734. nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
  1735. break;
  1736. default:
  1737. nw_type = BNXT_RE_ROCE_V1_PACKET;
  1738. break;
  1739. }
  1740. memcpy(&dgid.raw, &qplib_ah->dgid, 16);
  1741. is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
  1742. if (is_udp) {
  1743. if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
  1744. ip_version = 4;
  1745. ether_type = ETH_P_IP;
  1746. } else {
  1747. ip_version = 6;
  1748. ether_type = ETH_P_IPV6;
  1749. }
  1750. is_grh = false;
  1751. } else {
  1752. ether_type = ETH_P_IBOE;
  1753. is_grh = true;
  1754. }
  1755. is_eth = true;
  1756. is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
  1757. ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
  1758. ip_version, is_udp, 0, &qp->qp1_hdr);
  1759. /* ETH */
  1760. ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
  1761. ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
  1762. /* For vlan, check the sgid for vlan existence */
  1763. if (!is_vlan) {
  1764. qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
  1765. } else {
  1766. qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
  1767. qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
  1768. }
  1769. if (is_grh || (ip_version == 6)) {
  1770. memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
  1771. memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
  1772. sizeof(sgid));
  1773. qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
  1774. }
  1775. if (ip_version == 4) {
  1776. qp->qp1_hdr.ip4.tos = 0;
  1777. qp->qp1_hdr.ip4.id = 0;
  1778. qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
  1779. qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
  1780. memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
  1781. memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
  1782. qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
  1783. }
  1784. if (is_udp) {
  1785. qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
  1786. qp->qp1_hdr.udp.sport = htons(0x8CD1);
  1787. qp->qp1_hdr.udp.csum = 0;
  1788. }
  1789. /* BTH */
  1790. if (wr->opcode == IB_WR_SEND_WITH_IMM) {
  1791. qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
  1792. qp->qp1_hdr.immediate_present = 1;
  1793. } else {
  1794. qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
  1795. }
  1796. if (wr->send_flags & IB_SEND_SOLICITED)
  1797. qp->qp1_hdr.bth.solicited_event = 1;
  1798. /* pad_count */
  1799. qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
  1800. /* P_key for QP1 is for all members */
  1801. qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
  1802. qp->qp1_hdr.bth.destination_qpn = IB_QP1;
  1803. qp->qp1_hdr.bth.ack_req = 0;
  1804. qp->send_psn++;
  1805. qp->send_psn &= BTH_PSN_MASK;
  1806. qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
  1807. /* DETH */
  1808. /* Use the priviledged Q_Key for QP1 */
  1809. qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
  1810. qp->qp1_hdr.deth.source_qpn = IB_QP1;
  1811. /* Pack the QP1 to the transmit buffer */
  1812. buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
  1813. if (buf) {
  1814. ib_ud_header_pack(&qp->qp1_hdr, buf);
  1815. for (i = wqe->num_sge; i; i--) {
  1816. wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
  1817. wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
  1818. wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
  1819. }
  1820. /*
  1821. * Max Header buf size for IPV6 RoCE V2 is 86,
  1822. * which is same as the QP1 SQ header buffer.
  1823. * Header buf size for IPV4 RoCE V2 can be 66.
  1824. * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
  1825. * Subtract 20 bytes from QP1 SQ header buf size
  1826. */
  1827. if (is_udp && ip_version == 4)
  1828. sge.size -= 20;
  1829. /*
  1830. * Max Header buf size for RoCE V1 is 78.
  1831. * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
  1832. * Subtract 8 bytes from QP1 SQ header buf size
  1833. */
  1834. if (!is_udp)
  1835. sge.size -= 8;
  1836. /* Subtract 4 bytes for non vlan packets */
  1837. if (!is_vlan)
  1838. sge.size -= 4;
  1839. wqe->sg_list[0].addr = sge.addr;
  1840. wqe->sg_list[0].lkey = sge.lkey;
  1841. wqe->sg_list[0].size = sge.size;
  1842. wqe->num_sge++;
  1843. } else {
  1844. dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
  1845. rc = -ENOMEM;
  1846. }
  1847. return rc;
  1848. }
  1849. /* For the MAD layer, it only provides the recv SGE the size of
  1850. * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
  1851. * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
  1852. * receive packet (334 bytes) with no VLAN and then copy the GRH
  1853. * and the MAD datagram out to the provided SGE.
  1854. */
  1855. static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
  1856. struct ib_recv_wr *wr,
  1857. struct bnxt_qplib_swqe *wqe,
  1858. int payload_size)
  1859. {
  1860. struct bnxt_qplib_sge ref, sge;
  1861. u32 rq_prod_index;
  1862. struct bnxt_re_sqp_entries *sqp_entry;
  1863. rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
  1864. if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
  1865. return -ENOMEM;
  1866. /* Create 1 SGE to receive the entire
  1867. * ethernet packet
  1868. */
  1869. /* Save the reference from ULP */
  1870. ref.addr = wqe->sg_list[0].addr;
  1871. ref.lkey = wqe->sg_list[0].lkey;
  1872. ref.size = wqe->sg_list[0].size;
  1873. sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
  1874. /* SGE 1 */
  1875. wqe->sg_list[0].addr = sge.addr;
  1876. wqe->sg_list[0].lkey = sge.lkey;
  1877. wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
  1878. sge.size -= wqe->sg_list[0].size;
  1879. sqp_entry->sge.addr = ref.addr;
  1880. sqp_entry->sge.lkey = ref.lkey;
  1881. sqp_entry->sge.size = ref.size;
  1882. /* Store the wrid for reporting completion */
  1883. sqp_entry->wrid = wqe->wr_id;
  1884. /* change the wqe->wrid to table index */
  1885. wqe->wr_id = rq_prod_index;
  1886. return 0;
  1887. }
  1888. static int is_ud_qp(struct bnxt_re_qp *qp)
  1889. {
  1890. return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
  1891. }
  1892. static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
  1893. struct ib_send_wr *wr,
  1894. struct bnxt_qplib_swqe *wqe)
  1895. {
  1896. struct bnxt_re_ah *ah = NULL;
  1897. if (is_ud_qp(qp)) {
  1898. ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
  1899. wqe->send.q_key = ud_wr(wr)->remote_qkey;
  1900. wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
  1901. wqe->send.avid = ah->qplib_ah.id;
  1902. }
  1903. switch (wr->opcode) {
  1904. case IB_WR_SEND:
  1905. wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
  1906. break;
  1907. case IB_WR_SEND_WITH_IMM:
  1908. wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
  1909. wqe->send.imm_data = wr->ex.imm_data;
  1910. break;
  1911. case IB_WR_SEND_WITH_INV:
  1912. wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
  1913. wqe->send.inv_key = wr->ex.invalidate_rkey;
  1914. break;
  1915. default:
  1916. return -EINVAL;
  1917. }
  1918. if (wr->send_flags & IB_SEND_SIGNALED)
  1919. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  1920. if (wr->send_flags & IB_SEND_FENCE)
  1921. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  1922. if (wr->send_flags & IB_SEND_SOLICITED)
  1923. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
  1924. if (wr->send_flags & IB_SEND_INLINE)
  1925. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
  1926. return 0;
  1927. }
  1928. static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
  1929. struct bnxt_qplib_swqe *wqe)
  1930. {
  1931. switch (wr->opcode) {
  1932. case IB_WR_RDMA_WRITE:
  1933. wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
  1934. break;
  1935. case IB_WR_RDMA_WRITE_WITH_IMM:
  1936. wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
  1937. wqe->rdma.imm_data = wr->ex.imm_data;
  1938. break;
  1939. case IB_WR_RDMA_READ:
  1940. wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
  1941. wqe->rdma.inv_key = wr->ex.invalidate_rkey;
  1942. break;
  1943. default:
  1944. return -EINVAL;
  1945. }
  1946. wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
  1947. wqe->rdma.r_key = rdma_wr(wr)->rkey;
  1948. if (wr->send_flags & IB_SEND_SIGNALED)
  1949. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  1950. if (wr->send_flags & IB_SEND_FENCE)
  1951. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  1952. if (wr->send_flags & IB_SEND_SOLICITED)
  1953. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
  1954. if (wr->send_flags & IB_SEND_INLINE)
  1955. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
  1956. return 0;
  1957. }
  1958. static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
  1959. struct bnxt_qplib_swqe *wqe)
  1960. {
  1961. switch (wr->opcode) {
  1962. case IB_WR_ATOMIC_CMP_AND_SWP:
  1963. wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
  1964. wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
  1965. wqe->atomic.swap_data = atomic_wr(wr)->swap;
  1966. break;
  1967. case IB_WR_ATOMIC_FETCH_AND_ADD:
  1968. wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
  1969. wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
  1970. break;
  1971. default:
  1972. return -EINVAL;
  1973. }
  1974. wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
  1975. wqe->atomic.r_key = atomic_wr(wr)->rkey;
  1976. if (wr->send_flags & IB_SEND_SIGNALED)
  1977. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  1978. if (wr->send_flags & IB_SEND_FENCE)
  1979. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  1980. if (wr->send_flags & IB_SEND_SOLICITED)
  1981. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
  1982. return 0;
  1983. }
  1984. static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
  1985. struct bnxt_qplib_swqe *wqe)
  1986. {
  1987. wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
  1988. wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
  1989. /* Need unconditional fence for local invalidate
  1990. * opcode to work as expected.
  1991. */
  1992. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  1993. if (wr->send_flags & IB_SEND_SIGNALED)
  1994. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  1995. if (wr->send_flags & IB_SEND_SOLICITED)
  1996. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
  1997. return 0;
  1998. }
  1999. static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
  2000. struct bnxt_qplib_swqe *wqe)
  2001. {
  2002. struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
  2003. struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
  2004. int access = wr->access;
  2005. wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
  2006. wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
  2007. wqe->frmr.page_list = mr->pages;
  2008. wqe->frmr.page_list_len = mr->npages;
  2009. wqe->frmr.levels = qplib_frpl->hwq.level + 1;
  2010. wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
  2011. /* Need unconditional fence for reg_mr
  2012. * opcode to function as expected.
  2013. */
  2014. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
  2015. if (wr->wr.send_flags & IB_SEND_SIGNALED)
  2016. wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
  2017. if (access & IB_ACCESS_LOCAL_WRITE)
  2018. wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
  2019. if (access & IB_ACCESS_REMOTE_READ)
  2020. wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
  2021. if (access & IB_ACCESS_REMOTE_WRITE)
  2022. wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
  2023. if (access & IB_ACCESS_REMOTE_ATOMIC)
  2024. wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
  2025. if (access & IB_ACCESS_MW_BIND)
  2026. wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
  2027. wqe->frmr.l_key = wr->key;
  2028. wqe->frmr.length = wr->mr->length;
  2029. wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
  2030. wqe->frmr.va = wr->mr->iova;
  2031. return 0;
  2032. }
  2033. static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
  2034. struct ib_send_wr *wr,
  2035. struct bnxt_qplib_swqe *wqe)
  2036. {
  2037. /* Copy the inline data to the data field */
  2038. u8 *in_data;
  2039. u32 i, sge_len;
  2040. void *sge_addr;
  2041. in_data = wqe->inline_data;
  2042. for (i = 0; i < wr->num_sge; i++) {
  2043. sge_addr = (void *)(unsigned long)
  2044. wr->sg_list[i].addr;
  2045. sge_len = wr->sg_list[i].length;
  2046. if ((sge_len + wqe->inline_len) >
  2047. BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
  2048. dev_err(rdev_to_dev(rdev),
  2049. "Inline data size requested > supported value");
  2050. return -EINVAL;
  2051. }
  2052. sge_len = wr->sg_list[i].length;
  2053. memcpy(in_data, sge_addr, sge_len);
  2054. in_data += wr->sg_list[i].length;
  2055. wqe->inline_len += wr->sg_list[i].length;
  2056. }
  2057. return wqe->inline_len;
  2058. }
  2059. static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
  2060. struct ib_send_wr *wr,
  2061. struct bnxt_qplib_swqe *wqe)
  2062. {
  2063. int payload_sz = 0;
  2064. if (wr->send_flags & IB_SEND_INLINE)
  2065. payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
  2066. else
  2067. payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
  2068. wqe->num_sge);
  2069. return payload_sz;
  2070. }
  2071. static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
  2072. {
  2073. if ((qp->ib_qp.qp_type == IB_QPT_UD ||
  2074. qp->ib_qp.qp_type == IB_QPT_GSI ||
  2075. qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
  2076. qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
  2077. int qp_attr_mask;
  2078. struct ib_qp_attr qp_attr;
  2079. qp_attr_mask = IB_QP_STATE;
  2080. qp_attr.qp_state = IB_QPS_RTS;
  2081. bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
  2082. qp->qplib_qp.wqe_cnt = 0;
  2083. }
  2084. }
  2085. static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
  2086. struct bnxt_re_qp *qp,
  2087. struct ib_send_wr *wr)
  2088. {
  2089. struct bnxt_qplib_swqe wqe;
  2090. int rc = 0, payload_sz = 0;
  2091. unsigned long flags;
  2092. spin_lock_irqsave(&qp->sq_lock, flags);
  2093. memset(&wqe, 0, sizeof(wqe));
  2094. while (wr) {
  2095. /* House keeping */
  2096. memset(&wqe, 0, sizeof(wqe));
  2097. /* Common */
  2098. wqe.num_sge = wr->num_sge;
  2099. if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
  2100. dev_err(rdev_to_dev(rdev),
  2101. "Limit exceeded for Send SGEs");
  2102. rc = -EINVAL;
  2103. goto bad;
  2104. }
  2105. payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
  2106. if (payload_sz < 0) {
  2107. rc = -EINVAL;
  2108. goto bad;
  2109. }
  2110. wqe.wr_id = wr->wr_id;
  2111. wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
  2112. rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
  2113. if (!rc)
  2114. rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
  2115. bad:
  2116. if (rc) {
  2117. dev_err(rdev_to_dev(rdev),
  2118. "Post send failed opcode = %#x rc = %d",
  2119. wr->opcode, rc);
  2120. break;
  2121. }
  2122. wr = wr->next;
  2123. }
  2124. bnxt_qplib_post_send_db(&qp->qplib_qp);
  2125. bnxt_ud_qp_hw_stall_workaround(qp);
  2126. spin_unlock_irqrestore(&qp->sq_lock, flags);
  2127. return rc;
  2128. }
  2129. int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
  2130. struct ib_send_wr **bad_wr)
  2131. {
  2132. struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
  2133. struct bnxt_qplib_swqe wqe;
  2134. int rc = 0, payload_sz = 0;
  2135. unsigned long flags;
  2136. spin_lock_irqsave(&qp->sq_lock, flags);
  2137. while (wr) {
  2138. /* House keeping */
  2139. memset(&wqe, 0, sizeof(wqe));
  2140. /* Common */
  2141. wqe.num_sge = wr->num_sge;
  2142. if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
  2143. dev_err(rdev_to_dev(qp->rdev),
  2144. "Limit exceeded for Send SGEs");
  2145. rc = -EINVAL;
  2146. goto bad;
  2147. }
  2148. payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
  2149. if (payload_sz < 0) {
  2150. rc = -EINVAL;
  2151. goto bad;
  2152. }
  2153. wqe.wr_id = wr->wr_id;
  2154. switch (wr->opcode) {
  2155. case IB_WR_SEND:
  2156. case IB_WR_SEND_WITH_IMM:
  2157. if (ib_qp->qp_type == IB_QPT_GSI) {
  2158. rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
  2159. payload_sz);
  2160. if (rc)
  2161. goto bad;
  2162. wqe.rawqp1.lflags |=
  2163. SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
  2164. }
  2165. switch (wr->send_flags) {
  2166. case IB_SEND_IP_CSUM:
  2167. wqe.rawqp1.lflags |=
  2168. SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
  2169. break;
  2170. default:
  2171. break;
  2172. }
  2173. /* Fall thru to build the wqe */
  2174. case IB_WR_SEND_WITH_INV:
  2175. rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
  2176. break;
  2177. case IB_WR_RDMA_WRITE:
  2178. case IB_WR_RDMA_WRITE_WITH_IMM:
  2179. case IB_WR_RDMA_READ:
  2180. rc = bnxt_re_build_rdma_wqe(wr, &wqe);
  2181. break;
  2182. case IB_WR_ATOMIC_CMP_AND_SWP:
  2183. case IB_WR_ATOMIC_FETCH_AND_ADD:
  2184. rc = bnxt_re_build_atomic_wqe(wr, &wqe);
  2185. break;
  2186. case IB_WR_RDMA_READ_WITH_INV:
  2187. dev_err(rdev_to_dev(qp->rdev),
  2188. "RDMA Read with Invalidate is not supported");
  2189. rc = -EINVAL;
  2190. goto bad;
  2191. case IB_WR_LOCAL_INV:
  2192. rc = bnxt_re_build_inv_wqe(wr, &wqe);
  2193. break;
  2194. case IB_WR_REG_MR:
  2195. rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
  2196. break;
  2197. default:
  2198. /* Unsupported WRs */
  2199. dev_err(rdev_to_dev(qp->rdev),
  2200. "WR (%#x) is not supported", wr->opcode);
  2201. rc = -EINVAL;
  2202. goto bad;
  2203. }
  2204. if (!rc)
  2205. rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
  2206. bad:
  2207. if (rc) {
  2208. dev_err(rdev_to_dev(qp->rdev),
  2209. "post_send failed op:%#x qps = %#x rc = %d\n",
  2210. wr->opcode, qp->qplib_qp.state, rc);
  2211. *bad_wr = wr;
  2212. break;
  2213. }
  2214. wr = wr->next;
  2215. }
  2216. bnxt_qplib_post_send_db(&qp->qplib_qp);
  2217. bnxt_ud_qp_hw_stall_workaround(qp);
  2218. spin_unlock_irqrestore(&qp->sq_lock, flags);
  2219. return rc;
  2220. }
  2221. static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
  2222. struct bnxt_re_qp *qp,
  2223. struct ib_recv_wr *wr)
  2224. {
  2225. struct bnxt_qplib_swqe wqe;
  2226. int rc = 0;
  2227. memset(&wqe, 0, sizeof(wqe));
  2228. while (wr) {
  2229. /* House keeping */
  2230. memset(&wqe, 0, sizeof(wqe));
  2231. /* Common */
  2232. wqe.num_sge = wr->num_sge;
  2233. if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
  2234. dev_err(rdev_to_dev(rdev),
  2235. "Limit exceeded for Receive SGEs");
  2236. rc = -EINVAL;
  2237. break;
  2238. }
  2239. bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
  2240. wqe.wr_id = wr->wr_id;
  2241. wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
  2242. rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
  2243. if (rc)
  2244. break;
  2245. wr = wr->next;
  2246. }
  2247. if (!rc)
  2248. bnxt_qplib_post_recv_db(&qp->qplib_qp);
  2249. return rc;
  2250. }
  2251. int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
  2252. struct ib_recv_wr **bad_wr)
  2253. {
  2254. struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
  2255. struct bnxt_qplib_swqe wqe;
  2256. int rc = 0, payload_sz = 0;
  2257. unsigned long flags;
  2258. u32 count = 0;
  2259. spin_lock_irqsave(&qp->rq_lock, flags);
  2260. while (wr) {
  2261. /* House keeping */
  2262. memset(&wqe, 0, sizeof(wqe));
  2263. /* Common */
  2264. wqe.num_sge = wr->num_sge;
  2265. if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
  2266. dev_err(rdev_to_dev(qp->rdev),
  2267. "Limit exceeded for Receive SGEs");
  2268. rc = -EINVAL;
  2269. *bad_wr = wr;
  2270. break;
  2271. }
  2272. payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
  2273. wr->num_sge);
  2274. wqe.wr_id = wr->wr_id;
  2275. wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
  2276. if (ib_qp->qp_type == IB_QPT_GSI)
  2277. rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
  2278. payload_sz);
  2279. if (!rc)
  2280. rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
  2281. if (rc) {
  2282. *bad_wr = wr;
  2283. break;
  2284. }
  2285. /* Ring DB if the RQEs posted reaches a threshold value */
  2286. if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
  2287. bnxt_qplib_post_recv_db(&qp->qplib_qp);
  2288. count = 0;
  2289. }
  2290. wr = wr->next;
  2291. }
  2292. if (count)
  2293. bnxt_qplib_post_recv_db(&qp->qplib_qp);
  2294. spin_unlock_irqrestore(&qp->rq_lock, flags);
  2295. return rc;
  2296. }
  2297. /* Completion Queues */
  2298. int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
  2299. {
  2300. int rc;
  2301. struct bnxt_re_cq *cq;
  2302. struct bnxt_qplib_nq *nq;
  2303. struct bnxt_re_dev *rdev;
  2304. cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
  2305. rdev = cq->rdev;
  2306. nq = cq->qplib_cq.nq;
  2307. rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
  2308. if (rc) {
  2309. dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
  2310. return rc;
  2311. }
  2312. if (!IS_ERR_OR_NULL(cq->umem))
  2313. ib_umem_release(cq->umem);
  2314. atomic_dec(&rdev->cq_count);
  2315. nq->budget--;
  2316. kfree(cq->cql);
  2317. kfree(cq);
  2318. return 0;
  2319. }
  2320. struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
  2321. const struct ib_cq_init_attr *attr,
  2322. struct ib_ucontext *context,
  2323. struct ib_udata *udata)
  2324. {
  2325. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  2326. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  2327. struct bnxt_re_cq *cq = NULL;
  2328. int rc, entries;
  2329. int cqe = attr->cqe;
  2330. struct bnxt_qplib_nq *nq = NULL;
  2331. unsigned int nq_alloc_cnt;
  2332. /* Validate CQ fields */
  2333. if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
  2334. dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
  2335. return ERR_PTR(-EINVAL);
  2336. }
  2337. cq = kzalloc(sizeof(*cq), GFP_KERNEL);
  2338. if (!cq)
  2339. return ERR_PTR(-ENOMEM);
  2340. cq->rdev = rdev;
  2341. cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
  2342. entries = roundup_pow_of_two(cqe + 1);
  2343. if (entries > dev_attr->max_cq_wqes + 1)
  2344. entries = dev_attr->max_cq_wqes + 1;
  2345. if (context) {
  2346. struct bnxt_re_cq_req req;
  2347. struct bnxt_re_ucontext *uctx = container_of
  2348. (context,
  2349. struct bnxt_re_ucontext,
  2350. ib_uctx);
  2351. if (ib_copy_from_udata(&req, udata, sizeof(req))) {
  2352. rc = -EFAULT;
  2353. goto fail;
  2354. }
  2355. cq->umem = ib_umem_get(context, req.cq_va,
  2356. entries * sizeof(struct cq_base),
  2357. IB_ACCESS_LOCAL_WRITE, 1);
  2358. if (IS_ERR(cq->umem)) {
  2359. rc = PTR_ERR(cq->umem);
  2360. goto fail;
  2361. }
  2362. cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
  2363. cq->qplib_cq.nmap = cq->umem->nmap;
  2364. cq->qplib_cq.dpi = &uctx->dpi;
  2365. } else {
  2366. cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
  2367. cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
  2368. GFP_KERNEL);
  2369. if (!cq->cql) {
  2370. rc = -ENOMEM;
  2371. goto fail;
  2372. }
  2373. cq->qplib_cq.dpi = &rdev->dpi_privileged;
  2374. cq->qplib_cq.sghead = NULL;
  2375. cq->qplib_cq.nmap = 0;
  2376. }
  2377. /*
  2378. * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
  2379. * used for getting the NQ index.
  2380. */
  2381. nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
  2382. nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
  2383. cq->qplib_cq.max_wqe = entries;
  2384. cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
  2385. cq->qplib_cq.nq = nq;
  2386. rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
  2387. if (rc) {
  2388. dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
  2389. goto fail;
  2390. }
  2391. cq->ib_cq.cqe = entries;
  2392. cq->cq_period = cq->qplib_cq.period;
  2393. nq->budget++;
  2394. atomic_inc(&rdev->cq_count);
  2395. if (context) {
  2396. struct bnxt_re_cq_resp resp;
  2397. resp.cqid = cq->qplib_cq.id;
  2398. resp.tail = cq->qplib_cq.hwq.cons;
  2399. resp.phase = cq->qplib_cq.period;
  2400. resp.rsvd = 0;
  2401. rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
  2402. if (rc) {
  2403. dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
  2404. bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
  2405. goto c2fail;
  2406. }
  2407. }
  2408. return &cq->ib_cq;
  2409. c2fail:
  2410. if (context)
  2411. ib_umem_release(cq->umem);
  2412. fail:
  2413. kfree(cq->cql);
  2414. kfree(cq);
  2415. return ERR_PTR(rc);
  2416. }
  2417. static u8 __req_to_ib_wc_status(u8 qstatus)
  2418. {
  2419. switch (qstatus) {
  2420. case CQ_REQ_STATUS_OK:
  2421. return IB_WC_SUCCESS;
  2422. case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
  2423. return IB_WC_BAD_RESP_ERR;
  2424. case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
  2425. return IB_WC_LOC_LEN_ERR;
  2426. case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
  2427. return IB_WC_LOC_QP_OP_ERR;
  2428. case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
  2429. return IB_WC_LOC_PROT_ERR;
  2430. case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
  2431. return IB_WC_GENERAL_ERR;
  2432. case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
  2433. return IB_WC_REM_INV_REQ_ERR;
  2434. case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
  2435. return IB_WC_REM_ACCESS_ERR;
  2436. case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
  2437. return IB_WC_REM_OP_ERR;
  2438. case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
  2439. return IB_WC_RNR_RETRY_EXC_ERR;
  2440. case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
  2441. return IB_WC_RETRY_EXC_ERR;
  2442. case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
  2443. return IB_WC_WR_FLUSH_ERR;
  2444. default:
  2445. return IB_WC_GENERAL_ERR;
  2446. }
  2447. return 0;
  2448. }
  2449. static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
  2450. {
  2451. switch (qstatus) {
  2452. case CQ_RES_RAWETH_QP1_STATUS_OK:
  2453. return IB_WC_SUCCESS;
  2454. case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
  2455. return IB_WC_LOC_ACCESS_ERR;
  2456. case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
  2457. return IB_WC_LOC_LEN_ERR;
  2458. case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
  2459. return IB_WC_LOC_PROT_ERR;
  2460. case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
  2461. return IB_WC_LOC_QP_OP_ERR;
  2462. case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
  2463. return IB_WC_GENERAL_ERR;
  2464. case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
  2465. return IB_WC_WR_FLUSH_ERR;
  2466. case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
  2467. return IB_WC_WR_FLUSH_ERR;
  2468. default:
  2469. return IB_WC_GENERAL_ERR;
  2470. }
  2471. }
  2472. static u8 __rc_to_ib_wc_status(u8 qstatus)
  2473. {
  2474. switch (qstatus) {
  2475. case CQ_RES_RC_STATUS_OK:
  2476. return IB_WC_SUCCESS;
  2477. case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
  2478. return IB_WC_LOC_ACCESS_ERR;
  2479. case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
  2480. return IB_WC_LOC_LEN_ERR;
  2481. case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
  2482. return IB_WC_LOC_PROT_ERR;
  2483. case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
  2484. return IB_WC_LOC_QP_OP_ERR;
  2485. case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
  2486. return IB_WC_GENERAL_ERR;
  2487. case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
  2488. return IB_WC_REM_INV_REQ_ERR;
  2489. case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
  2490. return IB_WC_WR_FLUSH_ERR;
  2491. case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
  2492. return IB_WC_WR_FLUSH_ERR;
  2493. default:
  2494. return IB_WC_GENERAL_ERR;
  2495. }
  2496. }
  2497. static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
  2498. {
  2499. switch (cqe->type) {
  2500. case BNXT_QPLIB_SWQE_TYPE_SEND:
  2501. wc->opcode = IB_WC_SEND;
  2502. break;
  2503. case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
  2504. wc->opcode = IB_WC_SEND;
  2505. wc->wc_flags |= IB_WC_WITH_IMM;
  2506. break;
  2507. case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
  2508. wc->opcode = IB_WC_SEND;
  2509. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  2510. break;
  2511. case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
  2512. wc->opcode = IB_WC_RDMA_WRITE;
  2513. break;
  2514. case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
  2515. wc->opcode = IB_WC_RDMA_WRITE;
  2516. wc->wc_flags |= IB_WC_WITH_IMM;
  2517. break;
  2518. case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
  2519. wc->opcode = IB_WC_RDMA_READ;
  2520. break;
  2521. case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
  2522. wc->opcode = IB_WC_COMP_SWAP;
  2523. break;
  2524. case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
  2525. wc->opcode = IB_WC_FETCH_ADD;
  2526. break;
  2527. case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
  2528. wc->opcode = IB_WC_LOCAL_INV;
  2529. break;
  2530. case BNXT_QPLIB_SWQE_TYPE_REG_MR:
  2531. wc->opcode = IB_WC_REG_MR;
  2532. break;
  2533. default:
  2534. wc->opcode = IB_WC_SEND;
  2535. break;
  2536. }
  2537. wc->status = __req_to_ib_wc_status(cqe->status);
  2538. }
  2539. static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
  2540. u16 raweth_qp1_flags2)
  2541. {
  2542. bool is_ipv6 = false, is_ipv4 = false;
  2543. /* raweth_qp1_flags Bit 9-6 indicates itype */
  2544. if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
  2545. != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
  2546. return -1;
  2547. if (raweth_qp1_flags2 &
  2548. CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
  2549. raweth_qp1_flags2 &
  2550. CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
  2551. /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
  2552. (raweth_qp1_flags2 &
  2553. CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
  2554. (is_ipv6 = true) : (is_ipv4 = true);
  2555. return ((is_ipv6) ?
  2556. BNXT_RE_ROCEV2_IPV6_PACKET :
  2557. BNXT_RE_ROCEV2_IPV4_PACKET);
  2558. } else {
  2559. return BNXT_RE_ROCE_V1_PACKET;
  2560. }
  2561. }
  2562. static int bnxt_re_to_ib_nw_type(int nw_type)
  2563. {
  2564. u8 nw_hdr_type = 0xFF;
  2565. switch (nw_type) {
  2566. case BNXT_RE_ROCE_V1_PACKET:
  2567. nw_hdr_type = RDMA_NETWORK_ROCE_V1;
  2568. break;
  2569. case BNXT_RE_ROCEV2_IPV4_PACKET:
  2570. nw_hdr_type = RDMA_NETWORK_IPV4;
  2571. break;
  2572. case BNXT_RE_ROCEV2_IPV6_PACKET:
  2573. nw_hdr_type = RDMA_NETWORK_IPV6;
  2574. break;
  2575. }
  2576. return nw_hdr_type;
  2577. }
  2578. static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
  2579. void *rq_hdr_buf)
  2580. {
  2581. u8 *tmp_buf = NULL;
  2582. struct ethhdr *eth_hdr;
  2583. u16 eth_type;
  2584. bool rc = false;
  2585. tmp_buf = (u8 *)rq_hdr_buf;
  2586. /*
  2587. * If dest mac is not same as I/F mac, this could be a
  2588. * loopback address or multicast address, check whether
  2589. * it is a loopback packet
  2590. */
  2591. if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
  2592. tmp_buf += 4;
  2593. /* Check the ether type */
  2594. eth_hdr = (struct ethhdr *)tmp_buf;
  2595. eth_type = ntohs(eth_hdr->h_proto);
  2596. switch (eth_type) {
  2597. case ETH_P_IBOE:
  2598. rc = true;
  2599. break;
  2600. case ETH_P_IP:
  2601. case ETH_P_IPV6: {
  2602. u32 len;
  2603. struct udphdr *udp_hdr;
  2604. len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
  2605. sizeof(struct ipv6hdr));
  2606. tmp_buf += sizeof(struct ethhdr) + len;
  2607. udp_hdr = (struct udphdr *)tmp_buf;
  2608. if (ntohs(udp_hdr->dest) ==
  2609. ROCE_V2_UDP_DPORT)
  2610. rc = true;
  2611. break;
  2612. }
  2613. default:
  2614. break;
  2615. }
  2616. }
  2617. return rc;
  2618. }
  2619. static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
  2620. struct bnxt_qplib_cqe *cqe)
  2621. {
  2622. struct bnxt_re_dev *rdev = qp1_qp->rdev;
  2623. struct bnxt_re_sqp_entries *sqp_entry = NULL;
  2624. struct bnxt_re_qp *qp = rdev->qp1_sqp;
  2625. struct ib_send_wr *swr;
  2626. struct ib_ud_wr udwr;
  2627. struct ib_recv_wr rwr;
  2628. int pkt_type = 0;
  2629. u32 tbl_idx;
  2630. void *rq_hdr_buf;
  2631. dma_addr_t rq_hdr_buf_map;
  2632. dma_addr_t shrq_hdr_buf_map;
  2633. u32 offset = 0;
  2634. u32 skip_bytes = 0;
  2635. struct ib_sge s_sge[2];
  2636. struct ib_sge r_sge[2];
  2637. int rc;
  2638. memset(&udwr, 0, sizeof(udwr));
  2639. memset(&rwr, 0, sizeof(rwr));
  2640. memset(&s_sge, 0, sizeof(s_sge));
  2641. memset(&r_sge, 0, sizeof(r_sge));
  2642. swr = &udwr.wr;
  2643. tbl_idx = cqe->wr_id;
  2644. rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
  2645. (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
  2646. rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
  2647. tbl_idx);
  2648. /* Shadow QP header buffer */
  2649. shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
  2650. tbl_idx);
  2651. sqp_entry = &rdev->sqp_tbl[tbl_idx];
  2652. /* Store this cqe */
  2653. memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
  2654. sqp_entry->qp1_qp = qp1_qp;
  2655. /* Find packet type from the cqe */
  2656. pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
  2657. cqe->raweth_qp1_flags2);
  2658. if (pkt_type < 0) {
  2659. dev_err(rdev_to_dev(rdev), "Invalid packet\n");
  2660. return -EINVAL;
  2661. }
  2662. /* Adjust the offset for the user buffer and post in the rq */
  2663. if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
  2664. offset = 20;
  2665. /*
  2666. * QP1 loopback packet has 4 bytes of internal header before
  2667. * ether header. Skip these four bytes.
  2668. */
  2669. if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
  2670. skip_bytes = 4;
  2671. /* First send SGE . Skip the ether header*/
  2672. s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
  2673. + skip_bytes;
  2674. s_sge[0].lkey = 0xFFFFFFFF;
  2675. s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
  2676. BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
  2677. /* Second Send SGE */
  2678. s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
  2679. BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
  2680. if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
  2681. s_sge[1].addr += 8;
  2682. s_sge[1].lkey = 0xFFFFFFFF;
  2683. s_sge[1].length = 256;
  2684. /* First recv SGE */
  2685. r_sge[0].addr = shrq_hdr_buf_map;
  2686. r_sge[0].lkey = 0xFFFFFFFF;
  2687. r_sge[0].length = 40;
  2688. r_sge[1].addr = sqp_entry->sge.addr + offset;
  2689. r_sge[1].lkey = sqp_entry->sge.lkey;
  2690. r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
  2691. /* Create receive work request */
  2692. rwr.num_sge = 2;
  2693. rwr.sg_list = r_sge;
  2694. rwr.wr_id = tbl_idx;
  2695. rwr.next = NULL;
  2696. rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
  2697. if (rc) {
  2698. dev_err(rdev_to_dev(rdev),
  2699. "Failed to post Rx buffers to shadow QP");
  2700. return -ENOMEM;
  2701. }
  2702. swr->num_sge = 2;
  2703. swr->sg_list = s_sge;
  2704. swr->wr_id = tbl_idx;
  2705. swr->opcode = IB_WR_SEND;
  2706. swr->next = NULL;
  2707. udwr.ah = &rdev->sqp_ah->ib_ah;
  2708. udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
  2709. udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
  2710. /* post data received in the send queue */
  2711. rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
  2712. return 0;
  2713. }
  2714. static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
  2715. struct bnxt_qplib_cqe *cqe)
  2716. {
  2717. wc->opcode = IB_WC_RECV;
  2718. wc->status = __rawqp1_to_ib_wc_status(cqe->status);
  2719. wc->wc_flags |= IB_WC_GRH;
  2720. }
  2721. static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
  2722. u16 *vid, u8 *sl)
  2723. {
  2724. bool ret = false;
  2725. u32 metadata;
  2726. u16 tpid;
  2727. metadata = orig_cqe->raweth_qp1_metadata;
  2728. if (orig_cqe->raweth_qp1_flags2 &
  2729. CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
  2730. tpid = ((metadata &
  2731. CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
  2732. CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
  2733. if (tpid == ETH_P_8021Q) {
  2734. *vid = metadata &
  2735. CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
  2736. *sl = (metadata &
  2737. CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
  2738. CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
  2739. ret = true;
  2740. }
  2741. }
  2742. return ret;
  2743. }
  2744. static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
  2745. struct bnxt_qplib_cqe *cqe)
  2746. {
  2747. wc->opcode = IB_WC_RECV;
  2748. wc->status = __rc_to_ib_wc_status(cqe->status);
  2749. if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
  2750. wc->wc_flags |= IB_WC_WITH_IMM;
  2751. if (cqe->flags & CQ_RES_RC_FLAGS_INV)
  2752. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  2753. if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
  2754. (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
  2755. wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
  2756. }
  2757. static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
  2758. struct ib_wc *wc,
  2759. struct bnxt_qplib_cqe *cqe)
  2760. {
  2761. struct bnxt_re_dev *rdev = qp->rdev;
  2762. struct bnxt_re_qp *qp1_qp = NULL;
  2763. struct bnxt_qplib_cqe *orig_cqe = NULL;
  2764. struct bnxt_re_sqp_entries *sqp_entry = NULL;
  2765. int nw_type;
  2766. u32 tbl_idx;
  2767. u16 vlan_id;
  2768. u8 sl;
  2769. tbl_idx = cqe->wr_id;
  2770. sqp_entry = &rdev->sqp_tbl[tbl_idx];
  2771. qp1_qp = sqp_entry->qp1_qp;
  2772. orig_cqe = &sqp_entry->cqe;
  2773. wc->wr_id = sqp_entry->wrid;
  2774. wc->byte_len = orig_cqe->length;
  2775. wc->qp = &qp1_qp->ib_qp;
  2776. wc->ex.imm_data = orig_cqe->immdata;
  2777. wc->src_qp = orig_cqe->src_qp;
  2778. memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
  2779. if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
  2780. wc->vlan_id = vlan_id;
  2781. wc->sl = sl;
  2782. wc->wc_flags |= IB_WC_WITH_VLAN;
  2783. }
  2784. wc->port_num = 1;
  2785. wc->vendor_err = orig_cqe->status;
  2786. wc->opcode = IB_WC_RECV;
  2787. wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
  2788. wc->wc_flags |= IB_WC_GRH;
  2789. nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
  2790. orig_cqe->raweth_qp1_flags2);
  2791. if (nw_type >= 0) {
  2792. wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
  2793. wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
  2794. }
  2795. }
  2796. static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
  2797. struct bnxt_qplib_cqe *cqe)
  2798. {
  2799. wc->opcode = IB_WC_RECV;
  2800. wc->status = __rc_to_ib_wc_status(cqe->status);
  2801. if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
  2802. wc->wc_flags |= IB_WC_WITH_IMM;
  2803. if (cqe->flags & CQ_RES_RC_FLAGS_INV)
  2804. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  2805. if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
  2806. (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
  2807. wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
  2808. }
  2809. static int send_phantom_wqe(struct bnxt_re_qp *qp)
  2810. {
  2811. struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
  2812. unsigned long flags;
  2813. int rc = 0;
  2814. spin_lock_irqsave(&qp->sq_lock, flags);
  2815. rc = bnxt_re_bind_fence_mw(lib_qp);
  2816. if (!rc) {
  2817. lib_qp->sq.phantom_wqe_cnt++;
  2818. dev_dbg(&lib_qp->sq.hwq.pdev->dev,
  2819. "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
  2820. lib_qp->id, lib_qp->sq.hwq.prod,
  2821. HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
  2822. lib_qp->sq.phantom_wqe_cnt);
  2823. }
  2824. spin_unlock_irqrestore(&qp->sq_lock, flags);
  2825. return rc;
  2826. }
  2827. int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
  2828. {
  2829. struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
  2830. struct bnxt_re_qp *qp;
  2831. struct bnxt_qplib_cqe *cqe;
  2832. int i, ncqe, budget;
  2833. struct bnxt_qplib_q *sq;
  2834. struct bnxt_qplib_qp *lib_qp;
  2835. u32 tbl_idx;
  2836. struct bnxt_re_sqp_entries *sqp_entry = NULL;
  2837. unsigned long flags;
  2838. spin_lock_irqsave(&cq->cq_lock, flags);
  2839. budget = min_t(u32, num_entries, cq->max_cql);
  2840. num_entries = budget;
  2841. if (!cq->cql) {
  2842. dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
  2843. goto exit;
  2844. }
  2845. cqe = &cq->cql[0];
  2846. while (budget) {
  2847. lib_qp = NULL;
  2848. ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
  2849. if (lib_qp) {
  2850. sq = &lib_qp->sq;
  2851. if (sq->send_phantom) {
  2852. qp = container_of(lib_qp,
  2853. struct bnxt_re_qp, qplib_qp);
  2854. if (send_phantom_wqe(qp) == -ENOMEM)
  2855. dev_err(rdev_to_dev(cq->rdev),
  2856. "Phantom failed! Scheduled to send again\n");
  2857. else
  2858. sq->send_phantom = false;
  2859. }
  2860. }
  2861. if (ncqe < budget)
  2862. ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
  2863. cqe + ncqe,
  2864. budget - ncqe);
  2865. if (!ncqe)
  2866. break;
  2867. for (i = 0; i < ncqe; i++, cqe++) {
  2868. /* Transcribe each qplib_wqe back to ib_wc */
  2869. memset(wc, 0, sizeof(*wc));
  2870. wc->wr_id = cqe->wr_id;
  2871. wc->byte_len = cqe->length;
  2872. qp = container_of
  2873. ((struct bnxt_qplib_qp *)
  2874. (unsigned long)(cqe->qp_handle),
  2875. struct bnxt_re_qp, qplib_qp);
  2876. if (!qp) {
  2877. dev_err(rdev_to_dev(cq->rdev),
  2878. "POLL CQ : bad QP handle");
  2879. continue;
  2880. }
  2881. wc->qp = &qp->ib_qp;
  2882. wc->ex.imm_data = cqe->immdata;
  2883. wc->src_qp = cqe->src_qp;
  2884. memcpy(wc->smac, cqe->smac, ETH_ALEN);
  2885. wc->port_num = 1;
  2886. wc->vendor_err = cqe->status;
  2887. switch (cqe->opcode) {
  2888. case CQ_BASE_CQE_TYPE_REQ:
  2889. if (qp->qplib_qp.id ==
  2890. qp->rdev->qp1_sqp->qplib_qp.id) {
  2891. /* Handle this completion with
  2892. * the stored completion
  2893. */
  2894. memset(wc, 0, sizeof(*wc));
  2895. continue;
  2896. }
  2897. bnxt_re_process_req_wc(wc, cqe);
  2898. break;
  2899. case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
  2900. if (!cqe->status) {
  2901. int rc = 0;
  2902. rc = bnxt_re_process_raw_qp_pkt_rx
  2903. (qp, cqe);
  2904. if (!rc) {
  2905. memset(wc, 0, sizeof(*wc));
  2906. continue;
  2907. }
  2908. cqe->status = -1;
  2909. }
  2910. /* Errors need not be looped back.
  2911. * But change the wr_id to the one
  2912. * stored in the table
  2913. */
  2914. tbl_idx = cqe->wr_id;
  2915. sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
  2916. wc->wr_id = sqp_entry->wrid;
  2917. bnxt_re_process_res_rawqp1_wc(wc, cqe);
  2918. break;
  2919. case CQ_BASE_CQE_TYPE_RES_RC:
  2920. bnxt_re_process_res_rc_wc(wc, cqe);
  2921. break;
  2922. case CQ_BASE_CQE_TYPE_RES_UD:
  2923. if (qp->qplib_qp.id ==
  2924. qp->rdev->qp1_sqp->qplib_qp.id) {
  2925. /* Handle this completion with
  2926. * the stored completion
  2927. */
  2928. if (cqe->status) {
  2929. continue;
  2930. } else {
  2931. bnxt_re_process_res_shadow_qp_wc
  2932. (qp, wc, cqe);
  2933. break;
  2934. }
  2935. }
  2936. bnxt_re_process_res_ud_wc(wc, cqe);
  2937. break;
  2938. default:
  2939. dev_err(rdev_to_dev(cq->rdev),
  2940. "POLL CQ : type 0x%x not handled",
  2941. cqe->opcode);
  2942. continue;
  2943. }
  2944. wc++;
  2945. budget--;
  2946. }
  2947. }
  2948. exit:
  2949. spin_unlock_irqrestore(&cq->cq_lock, flags);
  2950. return num_entries - budget;
  2951. }
  2952. int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
  2953. enum ib_cq_notify_flags ib_cqn_flags)
  2954. {
  2955. struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
  2956. int type = 0, rc = 0;
  2957. unsigned long flags;
  2958. spin_lock_irqsave(&cq->cq_lock, flags);
  2959. /* Trigger on the very next completion */
  2960. if (ib_cqn_flags & IB_CQ_NEXT_COMP)
  2961. type = DBR_DBR_TYPE_CQ_ARMALL;
  2962. /* Trigger on the next solicited completion */
  2963. else if (ib_cqn_flags & IB_CQ_SOLICITED)
  2964. type = DBR_DBR_TYPE_CQ_ARMSE;
  2965. /* Poll to see if there are missed events */
  2966. if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
  2967. !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
  2968. rc = 1;
  2969. goto exit;
  2970. }
  2971. bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
  2972. exit:
  2973. spin_unlock_irqrestore(&cq->cq_lock, flags);
  2974. return rc;
  2975. }
  2976. /* Memory Regions */
  2977. struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
  2978. {
  2979. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  2980. struct bnxt_re_dev *rdev = pd->rdev;
  2981. struct bnxt_re_mr *mr;
  2982. u64 pbl = 0;
  2983. int rc;
  2984. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  2985. if (!mr)
  2986. return ERR_PTR(-ENOMEM);
  2987. mr->rdev = rdev;
  2988. mr->qplib_mr.pd = &pd->qplib_pd;
  2989. mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
  2990. mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
  2991. /* Allocate and register 0 as the address */
  2992. rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
  2993. if (rc)
  2994. goto fail;
  2995. mr->qplib_mr.hwq.level = PBL_LVL_MAX;
  2996. mr->qplib_mr.total_size = -1; /* Infinte length */
  2997. rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
  2998. PAGE_SIZE);
  2999. if (rc)
  3000. goto fail_mr;
  3001. mr->ib_mr.lkey = mr->qplib_mr.lkey;
  3002. if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
  3003. IB_ACCESS_REMOTE_ATOMIC))
  3004. mr->ib_mr.rkey = mr->ib_mr.lkey;
  3005. atomic_inc(&rdev->mr_count);
  3006. return &mr->ib_mr;
  3007. fail_mr:
  3008. bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
  3009. fail:
  3010. kfree(mr);
  3011. return ERR_PTR(rc);
  3012. }
  3013. int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
  3014. {
  3015. struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
  3016. struct bnxt_re_dev *rdev = mr->rdev;
  3017. int rc;
  3018. rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
  3019. if (rc)
  3020. dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
  3021. if (mr->pages) {
  3022. rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
  3023. &mr->qplib_frpl);
  3024. kfree(mr->pages);
  3025. mr->npages = 0;
  3026. mr->pages = NULL;
  3027. }
  3028. if (!IS_ERR_OR_NULL(mr->ib_umem))
  3029. ib_umem_release(mr->ib_umem);
  3030. kfree(mr);
  3031. atomic_dec(&rdev->mr_count);
  3032. return rc;
  3033. }
  3034. static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
  3035. {
  3036. struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
  3037. if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
  3038. return -ENOMEM;
  3039. mr->pages[mr->npages++] = addr;
  3040. return 0;
  3041. }
  3042. int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
  3043. unsigned int *sg_offset)
  3044. {
  3045. struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
  3046. mr->npages = 0;
  3047. return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
  3048. }
  3049. struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
  3050. u32 max_num_sg)
  3051. {
  3052. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  3053. struct bnxt_re_dev *rdev = pd->rdev;
  3054. struct bnxt_re_mr *mr = NULL;
  3055. int rc;
  3056. if (type != IB_MR_TYPE_MEM_REG) {
  3057. dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
  3058. return ERR_PTR(-EINVAL);
  3059. }
  3060. if (max_num_sg > MAX_PBL_LVL_1_PGS)
  3061. return ERR_PTR(-EINVAL);
  3062. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  3063. if (!mr)
  3064. return ERR_PTR(-ENOMEM);
  3065. mr->rdev = rdev;
  3066. mr->qplib_mr.pd = &pd->qplib_pd;
  3067. mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
  3068. mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
  3069. rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
  3070. if (rc)
  3071. goto bail;
  3072. mr->ib_mr.lkey = mr->qplib_mr.lkey;
  3073. mr->ib_mr.rkey = mr->ib_mr.lkey;
  3074. mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
  3075. if (!mr->pages) {
  3076. rc = -ENOMEM;
  3077. goto fail;
  3078. }
  3079. rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
  3080. &mr->qplib_frpl, max_num_sg);
  3081. if (rc) {
  3082. dev_err(rdev_to_dev(rdev),
  3083. "Failed to allocate HW FR page list");
  3084. goto fail_mr;
  3085. }
  3086. atomic_inc(&rdev->mr_count);
  3087. return &mr->ib_mr;
  3088. fail_mr:
  3089. kfree(mr->pages);
  3090. fail:
  3091. bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
  3092. bail:
  3093. kfree(mr);
  3094. return ERR_PTR(rc);
  3095. }
  3096. struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
  3097. struct ib_udata *udata)
  3098. {
  3099. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  3100. struct bnxt_re_dev *rdev = pd->rdev;
  3101. struct bnxt_re_mw *mw;
  3102. int rc;
  3103. mw = kzalloc(sizeof(*mw), GFP_KERNEL);
  3104. if (!mw)
  3105. return ERR_PTR(-ENOMEM);
  3106. mw->rdev = rdev;
  3107. mw->qplib_mw.pd = &pd->qplib_pd;
  3108. mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
  3109. CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
  3110. CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
  3111. rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
  3112. if (rc) {
  3113. dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
  3114. goto fail;
  3115. }
  3116. mw->ib_mw.rkey = mw->qplib_mw.rkey;
  3117. atomic_inc(&rdev->mw_count);
  3118. return &mw->ib_mw;
  3119. fail:
  3120. kfree(mw);
  3121. return ERR_PTR(rc);
  3122. }
  3123. int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
  3124. {
  3125. struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
  3126. struct bnxt_re_dev *rdev = mw->rdev;
  3127. int rc;
  3128. rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
  3129. if (rc) {
  3130. dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
  3131. return rc;
  3132. }
  3133. kfree(mw);
  3134. atomic_dec(&rdev->mw_count);
  3135. return rc;
  3136. }
  3137. static int bnxt_re_page_size_ok(int page_shift)
  3138. {
  3139. switch (page_shift) {
  3140. case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
  3141. case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
  3142. case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
  3143. case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
  3144. case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
  3145. case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
  3146. case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
  3147. case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
  3148. return 1;
  3149. default:
  3150. return 0;
  3151. }
  3152. }
  3153. static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
  3154. int page_shift)
  3155. {
  3156. u64 *pbl_tbl = pbl_tbl_orig;
  3157. u64 paddr;
  3158. u64 page_mask = (1ULL << page_shift) - 1;
  3159. int i, pages;
  3160. struct scatterlist *sg;
  3161. int entry;
  3162. for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
  3163. pages = sg_dma_len(sg) >> PAGE_SHIFT;
  3164. for (i = 0; i < pages; i++) {
  3165. paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
  3166. if (pbl_tbl == pbl_tbl_orig)
  3167. *pbl_tbl++ = paddr & ~page_mask;
  3168. else if ((paddr & page_mask) == 0)
  3169. *pbl_tbl++ = paddr;
  3170. }
  3171. }
  3172. return pbl_tbl - pbl_tbl_orig;
  3173. }
  3174. /* uverbs */
  3175. struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
  3176. u64 virt_addr, int mr_access_flags,
  3177. struct ib_udata *udata)
  3178. {
  3179. struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
  3180. struct bnxt_re_dev *rdev = pd->rdev;
  3181. struct bnxt_re_mr *mr;
  3182. struct ib_umem *umem;
  3183. u64 *pbl_tbl = NULL;
  3184. int umem_pgs, page_shift, rc;
  3185. if (length > BNXT_RE_MAX_MR_SIZE) {
  3186. dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
  3187. length, BNXT_RE_MAX_MR_SIZE);
  3188. return ERR_PTR(-ENOMEM);
  3189. }
  3190. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  3191. if (!mr)
  3192. return ERR_PTR(-ENOMEM);
  3193. mr->rdev = rdev;
  3194. mr->qplib_mr.pd = &pd->qplib_pd;
  3195. mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
  3196. mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
  3197. rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
  3198. if (rc) {
  3199. dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
  3200. goto free_mr;
  3201. }
  3202. /* The fixed portion of the rkey is the same as the lkey */
  3203. mr->ib_mr.rkey = mr->qplib_mr.rkey;
  3204. umem = ib_umem_get(ib_pd->uobject->context, start, length,
  3205. mr_access_flags, 0);
  3206. if (IS_ERR(umem)) {
  3207. dev_err(rdev_to_dev(rdev), "Failed to get umem");
  3208. rc = -EFAULT;
  3209. goto free_mrw;
  3210. }
  3211. mr->ib_umem = umem;
  3212. mr->qplib_mr.va = virt_addr;
  3213. umem_pgs = ib_umem_page_count(umem);
  3214. if (!umem_pgs) {
  3215. dev_err(rdev_to_dev(rdev), "umem is invalid!");
  3216. rc = -EINVAL;
  3217. goto free_umem;
  3218. }
  3219. mr->qplib_mr.total_size = length;
  3220. pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
  3221. if (!pbl_tbl) {
  3222. rc = -ENOMEM;
  3223. goto free_umem;
  3224. }
  3225. page_shift = umem->page_shift;
  3226. if (!bnxt_re_page_size_ok(page_shift)) {
  3227. dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
  3228. rc = -EFAULT;
  3229. goto fail;
  3230. }
  3231. if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
  3232. dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
  3233. length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
  3234. rc = -EINVAL;
  3235. goto fail;
  3236. }
  3237. if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
  3238. page_shift = BNXT_RE_PAGE_SHIFT_2M;
  3239. dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
  3240. 1 << page_shift);
  3241. }
  3242. /* Map umem buf ptrs to the PBL */
  3243. umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
  3244. rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
  3245. umem_pgs, false, 1 << page_shift);
  3246. if (rc) {
  3247. dev_err(rdev_to_dev(rdev), "Failed to register user MR");
  3248. goto fail;
  3249. }
  3250. kfree(pbl_tbl);
  3251. mr->ib_mr.lkey = mr->qplib_mr.lkey;
  3252. mr->ib_mr.rkey = mr->qplib_mr.lkey;
  3253. atomic_inc(&rdev->mr_count);
  3254. return &mr->ib_mr;
  3255. fail:
  3256. kfree(pbl_tbl);
  3257. free_umem:
  3258. ib_umem_release(umem);
  3259. free_mrw:
  3260. bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
  3261. free_mr:
  3262. kfree(mr);
  3263. return ERR_PTR(rc);
  3264. }
  3265. struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
  3266. struct ib_udata *udata)
  3267. {
  3268. struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
  3269. struct bnxt_re_uctx_resp resp;
  3270. struct bnxt_re_ucontext *uctx;
  3271. struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
  3272. int rc;
  3273. dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
  3274. ibdev->uverbs_abi_ver);
  3275. if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
  3276. dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
  3277. BNXT_RE_ABI_VERSION);
  3278. return ERR_PTR(-EPERM);
  3279. }
  3280. uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
  3281. if (!uctx)
  3282. return ERR_PTR(-ENOMEM);
  3283. uctx->rdev = rdev;
  3284. uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
  3285. if (!uctx->shpg) {
  3286. rc = -ENOMEM;
  3287. goto fail;
  3288. }
  3289. spin_lock_init(&uctx->sh_lock);
  3290. resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
  3291. resp.max_qp = rdev->qplib_ctx.qpc_count;
  3292. resp.pg_size = PAGE_SIZE;
  3293. resp.cqe_sz = sizeof(struct cq_base);
  3294. resp.max_cqd = dev_attr->max_cq_wqes;
  3295. resp.rsvd = 0;
  3296. rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
  3297. if (rc) {
  3298. dev_err(rdev_to_dev(rdev), "Failed to copy user context");
  3299. rc = -EFAULT;
  3300. goto cfail;
  3301. }
  3302. return &uctx->ib_uctx;
  3303. cfail:
  3304. free_page((unsigned long)uctx->shpg);
  3305. uctx->shpg = NULL;
  3306. fail:
  3307. kfree(uctx);
  3308. return ERR_PTR(rc);
  3309. }
  3310. int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
  3311. {
  3312. struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
  3313. struct bnxt_re_ucontext,
  3314. ib_uctx);
  3315. struct bnxt_re_dev *rdev = uctx->rdev;
  3316. int rc = 0;
  3317. if (uctx->shpg)
  3318. free_page((unsigned long)uctx->shpg);
  3319. if (uctx->dpi.dbr) {
  3320. /* Free DPI only if this is the first PD allocated by the
  3321. * application and mark the context dpi as NULL
  3322. */
  3323. rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
  3324. &rdev->qplib_res.dpi_tbl,
  3325. &uctx->dpi);
  3326. if (rc)
  3327. dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
  3328. /* Don't fail, continue*/
  3329. uctx->dpi.dbr = NULL;
  3330. }
  3331. kfree(uctx);
  3332. return 0;
  3333. }
  3334. /* Helper function to mmap the virtual memory from user app */
  3335. int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
  3336. {
  3337. struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
  3338. struct bnxt_re_ucontext,
  3339. ib_uctx);
  3340. struct bnxt_re_dev *rdev = uctx->rdev;
  3341. u64 pfn;
  3342. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  3343. return -EINVAL;
  3344. if (vma->vm_pgoff) {
  3345. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  3346. if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  3347. PAGE_SIZE, vma->vm_page_prot)) {
  3348. dev_err(rdev_to_dev(rdev), "Failed to map DPI");
  3349. return -EAGAIN;
  3350. }
  3351. } else {
  3352. pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
  3353. if (remap_pfn_range(vma, vma->vm_start,
  3354. pfn, PAGE_SIZE, vma->vm_page_prot)) {
  3355. dev_err(rdev_to_dev(rdev),
  3356. "Failed to map shared page");
  3357. return -EAGAIN;
  3358. }
  3359. }
  3360. return 0;
  3361. }