hns_roce_hw_v1.c 149 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069
  1. /*
  2. * Copyright (c) 2016 Hisilicon Limited.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/platform_device.h>
  33. #include <linux/acpi.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/of.h>
  37. #include <linux/of_platform.h>
  38. #include <rdma/ib_umem.h>
  39. #include "hns_roce_common.h"
  40. #include "hns_roce_device.h"
  41. #include "hns_roce_cmd.h"
  42. #include "hns_roce_hem.h"
  43. #include "hns_roce_hw_v1.h"
  44. static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
  45. {
  46. dseg->lkey = cpu_to_le32(sg->lkey);
  47. dseg->addr = cpu_to_le64(sg->addr);
  48. dseg->len = cpu_to_le32(sg->length);
  49. }
  50. static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
  51. u32 rkey)
  52. {
  53. rseg->raddr = cpu_to_le64(remote_addr);
  54. rseg->rkey = cpu_to_le32(rkey);
  55. rseg->len = 0;
  56. }
  57. static int hns_roce_v1_post_send(struct ib_qp *ibqp,
  58. const struct ib_send_wr *wr,
  59. const struct ib_send_wr **bad_wr)
  60. {
  61. struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  62. struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
  63. struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
  64. struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
  65. struct hns_roce_wqe_data_seg *dseg = NULL;
  66. struct hns_roce_qp *qp = to_hr_qp(ibqp);
  67. struct device *dev = &hr_dev->pdev->dev;
  68. struct hns_roce_sq_db sq_db;
  69. int ps_opcode = 0, i = 0;
  70. unsigned long flags = 0;
  71. void *wqe = NULL;
  72. u32 doorbell[2];
  73. int nreq = 0;
  74. u32 ind = 0;
  75. int ret = 0;
  76. u8 *smac;
  77. int loopback;
  78. if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
  79. ibqp->qp_type != IB_QPT_RC)) {
  80. dev_err(dev, "un-supported QP type\n");
  81. *bad_wr = NULL;
  82. return -EOPNOTSUPP;
  83. }
  84. spin_lock_irqsave(&qp->sq.lock, flags);
  85. ind = qp->sq_next_wqe;
  86. for (nreq = 0; wr; ++nreq, wr = wr->next) {
  87. if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
  88. ret = -ENOMEM;
  89. *bad_wr = wr;
  90. goto out;
  91. }
  92. if (unlikely(wr->num_sge > qp->sq.max_gs)) {
  93. dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
  94. wr->num_sge, qp->sq.max_gs);
  95. ret = -EINVAL;
  96. *bad_wr = wr;
  97. goto out;
  98. }
  99. wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
  100. qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
  101. wr->wr_id;
  102. /* Corresponding to the RC and RD type wqe process separately */
  103. if (ibqp->qp_type == IB_QPT_GSI) {
  104. ud_sq_wqe = wqe;
  105. roce_set_field(ud_sq_wqe->dmac_h,
  106. UD_SEND_WQE_U32_4_DMAC_0_M,
  107. UD_SEND_WQE_U32_4_DMAC_0_S,
  108. ah->av.mac[0]);
  109. roce_set_field(ud_sq_wqe->dmac_h,
  110. UD_SEND_WQE_U32_4_DMAC_1_M,
  111. UD_SEND_WQE_U32_4_DMAC_1_S,
  112. ah->av.mac[1]);
  113. roce_set_field(ud_sq_wqe->dmac_h,
  114. UD_SEND_WQE_U32_4_DMAC_2_M,
  115. UD_SEND_WQE_U32_4_DMAC_2_S,
  116. ah->av.mac[2]);
  117. roce_set_field(ud_sq_wqe->dmac_h,
  118. UD_SEND_WQE_U32_4_DMAC_3_M,
  119. UD_SEND_WQE_U32_4_DMAC_3_S,
  120. ah->av.mac[3]);
  121. roce_set_field(ud_sq_wqe->u32_8,
  122. UD_SEND_WQE_U32_8_DMAC_4_M,
  123. UD_SEND_WQE_U32_8_DMAC_4_S,
  124. ah->av.mac[4]);
  125. roce_set_field(ud_sq_wqe->u32_8,
  126. UD_SEND_WQE_U32_8_DMAC_5_M,
  127. UD_SEND_WQE_U32_8_DMAC_5_S,
  128. ah->av.mac[5]);
  129. smac = (u8 *)hr_dev->dev_addr[qp->port];
  130. loopback = ether_addr_equal_unaligned(ah->av.mac,
  131. smac) ? 1 : 0;
  132. roce_set_bit(ud_sq_wqe->u32_8,
  133. UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
  134. loopback);
  135. roce_set_field(ud_sq_wqe->u32_8,
  136. UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
  137. UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
  138. HNS_ROCE_WQE_OPCODE_SEND);
  139. roce_set_field(ud_sq_wqe->u32_8,
  140. UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
  141. UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
  142. 2);
  143. roce_set_bit(ud_sq_wqe->u32_8,
  144. UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
  145. 1);
  146. ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
  147. cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
  148. (wr->send_flags & IB_SEND_SOLICITED ?
  149. cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
  150. ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
  151. cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
  152. roce_set_field(ud_sq_wqe->u32_16,
  153. UD_SEND_WQE_U32_16_DEST_QP_M,
  154. UD_SEND_WQE_U32_16_DEST_QP_S,
  155. ud_wr(wr)->remote_qpn);
  156. roce_set_field(ud_sq_wqe->u32_16,
  157. UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
  158. UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
  159. ah->av.stat_rate);
  160. roce_set_field(ud_sq_wqe->u32_36,
  161. UD_SEND_WQE_U32_36_FLOW_LABEL_M,
  162. UD_SEND_WQE_U32_36_FLOW_LABEL_S,
  163. ah->av.sl_tclass_flowlabel &
  164. HNS_ROCE_FLOW_LABEL_MASK);
  165. roce_set_field(ud_sq_wqe->u32_36,
  166. UD_SEND_WQE_U32_36_PRIORITY_M,
  167. UD_SEND_WQE_U32_36_PRIORITY_S,
  168. le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
  169. HNS_ROCE_SL_SHIFT);
  170. roce_set_field(ud_sq_wqe->u32_36,
  171. UD_SEND_WQE_U32_36_SGID_INDEX_M,
  172. UD_SEND_WQE_U32_36_SGID_INDEX_S,
  173. hns_get_gid_index(hr_dev, qp->phy_port,
  174. ah->av.gid_index));
  175. roce_set_field(ud_sq_wqe->u32_40,
  176. UD_SEND_WQE_U32_40_HOP_LIMIT_M,
  177. UD_SEND_WQE_U32_40_HOP_LIMIT_S,
  178. ah->av.hop_limit);
  179. roce_set_field(ud_sq_wqe->u32_40,
  180. UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
  181. UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
  182. ah->av.sl_tclass_flowlabel >>
  183. HNS_ROCE_TCLASS_SHIFT);
  184. memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
  185. ud_sq_wqe->va0_l =
  186. cpu_to_le32((u32)wr->sg_list[0].addr);
  187. ud_sq_wqe->va0_h =
  188. cpu_to_le32((wr->sg_list[0].addr) >> 32);
  189. ud_sq_wqe->l_key0 =
  190. cpu_to_le32(wr->sg_list[0].lkey);
  191. ud_sq_wqe->va1_l =
  192. cpu_to_le32((u32)wr->sg_list[1].addr);
  193. ud_sq_wqe->va1_h =
  194. cpu_to_le32((wr->sg_list[1].addr) >> 32);
  195. ud_sq_wqe->l_key1 =
  196. cpu_to_le32(wr->sg_list[1].lkey);
  197. ind++;
  198. } else if (ibqp->qp_type == IB_QPT_RC) {
  199. u32 tmp_len = 0;
  200. ctrl = wqe;
  201. memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
  202. for (i = 0; i < wr->num_sge; i++)
  203. tmp_len += wr->sg_list[i].length;
  204. ctrl->msg_length =
  205. cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len);
  206. ctrl->sgl_pa_h = 0;
  207. ctrl->flag = 0;
  208. switch (wr->opcode) {
  209. case IB_WR_SEND_WITH_IMM:
  210. case IB_WR_RDMA_WRITE_WITH_IMM:
  211. ctrl->imm_data = wr->ex.imm_data;
  212. break;
  213. case IB_WR_SEND_WITH_INV:
  214. ctrl->inv_key =
  215. cpu_to_le32(wr->ex.invalidate_rkey);
  216. break;
  217. default:
  218. ctrl->imm_data = 0;
  219. break;
  220. }
  221. /*Ctrl field, ctrl set type: sig, solic, imm, fence */
  222. /* SO wait for conforming application scenarios */
  223. ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
  224. cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
  225. (wr->send_flags & IB_SEND_SOLICITED ?
  226. cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
  227. ((wr->opcode == IB_WR_SEND_WITH_IMM ||
  228. wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
  229. cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
  230. (wr->send_flags & IB_SEND_FENCE ?
  231. (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
  232. wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
  233. switch (wr->opcode) {
  234. case IB_WR_RDMA_READ:
  235. ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
  236. set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
  237. rdma_wr(wr)->rkey);
  238. break;
  239. case IB_WR_RDMA_WRITE:
  240. case IB_WR_RDMA_WRITE_WITH_IMM:
  241. ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
  242. set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
  243. rdma_wr(wr)->rkey);
  244. break;
  245. case IB_WR_SEND:
  246. case IB_WR_SEND_WITH_INV:
  247. case IB_WR_SEND_WITH_IMM:
  248. ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
  249. break;
  250. case IB_WR_LOCAL_INV:
  251. break;
  252. case IB_WR_ATOMIC_CMP_AND_SWP:
  253. case IB_WR_ATOMIC_FETCH_AND_ADD:
  254. case IB_WR_LSO:
  255. default:
  256. ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
  257. break;
  258. }
  259. ctrl->flag |= cpu_to_le32(ps_opcode);
  260. wqe += sizeof(struct hns_roce_wqe_raddr_seg);
  261. dseg = wqe;
  262. if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
  263. if (le32_to_cpu(ctrl->msg_length) >
  264. hr_dev->caps.max_sq_inline) {
  265. ret = -EINVAL;
  266. *bad_wr = wr;
  267. dev_err(dev, "inline len(1-%d)=%d, illegal",
  268. ctrl->msg_length,
  269. hr_dev->caps.max_sq_inline);
  270. goto out;
  271. }
  272. for (i = 0; i < wr->num_sge; i++) {
  273. memcpy(wqe, ((void *) (uintptr_t)
  274. wr->sg_list[i].addr),
  275. wr->sg_list[i].length);
  276. wqe += wr->sg_list[i].length;
  277. }
  278. ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
  279. } else {
  280. /*sqe num is two */
  281. for (i = 0; i < wr->num_sge; i++)
  282. set_data_seg(dseg + i, wr->sg_list + i);
  283. ctrl->flag |= cpu_to_le32(wr->num_sge <<
  284. HNS_ROCE_WQE_SGE_NUM_BIT);
  285. }
  286. ind++;
  287. }
  288. }
  289. out:
  290. /* Set DB return */
  291. if (likely(nreq)) {
  292. qp->sq.head += nreq;
  293. /* Memory barrier */
  294. wmb();
  295. sq_db.u32_4 = 0;
  296. sq_db.u32_8 = 0;
  297. roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
  298. SQ_DOORBELL_U32_4_SQ_HEAD_S,
  299. (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
  300. roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
  301. SQ_DOORBELL_U32_4_SL_S, qp->sl);
  302. roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
  303. SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
  304. roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
  305. SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
  306. roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
  307. doorbell[0] = le32_to_cpu(sq_db.u32_4);
  308. doorbell[1] = le32_to_cpu(sq_db.u32_8);
  309. hns_roce_write64_k((__le32 *)doorbell, qp->sq.db_reg_l);
  310. qp->sq_next_wqe = ind;
  311. }
  312. spin_unlock_irqrestore(&qp->sq.lock, flags);
  313. return ret;
  314. }
  315. static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
  316. const struct ib_recv_wr *wr,
  317. const struct ib_recv_wr **bad_wr)
  318. {
  319. int ret = 0;
  320. int nreq = 0;
  321. int ind = 0;
  322. int i = 0;
  323. u32 reg_val;
  324. unsigned long flags = 0;
  325. struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
  326. struct hns_roce_wqe_data_seg *scat = NULL;
  327. struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
  328. struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  329. struct device *dev = &hr_dev->pdev->dev;
  330. struct hns_roce_rq_db rq_db;
  331. uint32_t doorbell[2] = {0};
  332. spin_lock_irqsave(&hr_qp->rq.lock, flags);
  333. ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
  334. for (nreq = 0; wr; ++nreq, wr = wr->next) {
  335. if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
  336. hr_qp->ibqp.recv_cq)) {
  337. ret = -ENOMEM;
  338. *bad_wr = wr;
  339. goto out;
  340. }
  341. if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
  342. dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
  343. wr->num_sge, hr_qp->rq.max_gs);
  344. ret = -EINVAL;
  345. *bad_wr = wr;
  346. goto out;
  347. }
  348. ctrl = get_recv_wqe(hr_qp, ind);
  349. roce_set_field(ctrl->rwqe_byte_12,
  350. RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
  351. RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
  352. wr->num_sge);
  353. scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
  354. for (i = 0; i < wr->num_sge; i++)
  355. set_data_seg(scat + i, wr->sg_list + i);
  356. hr_qp->rq.wrid[ind] = wr->wr_id;
  357. ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
  358. }
  359. out:
  360. if (likely(nreq)) {
  361. hr_qp->rq.head += nreq;
  362. /* Memory barrier */
  363. wmb();
  364. if (ibqp->qp_type == IB_QPT_GSI) {
  365. __le32 tmp;
  366. /* SW update GSI rq header */
  367. reg_val = roce_read(to_hr_dev(ibqp->device),
  368. ROCEE_QP1C_CFG3_0_REG +
  369. QP1C_CFGN_OFFSET * hr_qp->phy_port);
  370. tmp = cpu_to_le32(reg_val);
  371. roce_set_field(tmp,
  372. ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
  373. ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
  374. hr_qp->rq.head);
  375. reg_val = le32_to_cpu(tmp);
  376. roce_write(to_hr_dev(ibqp->device),
  377. ROCEE_QP1C_CFG3_0_REG +
  378. QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
  379. } else {
  380. rq_db.u32_4 = 0;
  381. rq_db.u32_8 = 0;
  382. roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
  383. RQ_DOORBELL_U32_4_RQ_HEAD_S,
  384. hr_qp->rq.head);
  385. roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
  386. RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
  387. roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
  388. RQ_DOORBELL_U32_8_CMD_S, 1);
  389. roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
  390. 1);
  391. doorbell[0] = le32_to_cpu(rq_db.u32_4);
  392. doorbell[1] = le32_to_cpu(rq_db.u32_8);
  393. hns_roce_write64_k((__le32 *)doorbell,
  394. hr_qp->rq.db_reg_l);
  395. }
  396. }
  397. spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
  398. return ret;
  399. }
  400. static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
  401. int sdb_mode, int odb_mode)
  402. {
  403. __le32 tmp;
  404. u32 val;
  405. val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
  406. tmp = cpu_to_le32(val);
  407. roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
  408. roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
  409. val = le32_to_cpu(tmp);
  410. roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
  411. }
  412. static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
  413. u32 odb_mode)
  414. {
  415. __le32 tmp;
  416. u32 val;
  417. /* Configure SDB/ODB extend mode */
  418. val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
  419. tmp = cpu_to_le32(val);
  420. roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
  421. roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
  422. val = le32_to_cpu(tmp);
  423. roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
  424. }
  425. static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
  426. u32 sdb_alful)
  427. {
  428. __le32 tmp;
  429. u32 val;
  430. /* Configure SDB */
  431. val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
  432. tmp = cpu_to_le32(val);
  433. roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
  434. ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
  435. roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
  436. ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
  437. val = le32_to_cpu(tmp);
  438. roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
  439. }
  440. static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
  441. u32 odb_alful)
  442. {
  443. __le32 tmp;
  444. u32 val;
  445. /* Configure ODB */
  446. val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
  447. tmp = cpu_to_le32(val);
  448. roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
  449. ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
  450. roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
  451. ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
  452. val = le32_to_cpu(tmp);
  453. roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
  454. }
  455. static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
  456. u32 ext_sdb_alful)
  457. {
  458. struct device *dev = &hr_dev->pdev->dev;
  459. struct hns_roce_v1_priv *priv;
  460. struct hns_roce_db_table *db;
  461. dma_addr_t sdb_dma_addr;
  462. __le32 tmp;
  463. u32 val;
  464. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  465. db = &priv->db_table;
  466. /* Configure extend SDB threshold */
  467. roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
  468. roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
  469. /* Configure extend SDB base addr */
  470. sdb_dma_addr = db->ext_db->sdb_buf_list->map;
  471. roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
  472. /* Configure extend SDB depth */
  473. val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
  474. tmp = cpu_to_le32(val);
  475. roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
  476. ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
  477. db->ext_db->esdb_dep);
  478. /*
  479. * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
  480. * using 4K page, and shift more 32 because of
  481. * caculating the high 32 bit value evaluated to hardware.
  482. */
  483. roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
  484. ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
  485. val = le32_to_cpu(tmp);
  486. roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
  487. dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
  488. dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
  489. ext_sdb_alept, ext_sdb_alful);
  490. }
  491. static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
  492. u32 ext_odb_alful)
  493. {
  494. struct device *dev = &hr_dev->pdev->dev;
  495. struct hns_roce_v1_priv *priv;
  496. struct hns_roce_db_table *db;
  497. dma_addr_t odb_dma_addr;
  498. __le32 tmp;
  499. u32 val;
  500. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  501. db = &priv->db_table;
  502. /* Configure extend ODB threshold */
  503. roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
  504. roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
  505. /* Configure extend ODB base addr */
  506. odb_dma_addr = db->ext_db->odb_buf_list->map;
  507. roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
  508. /* Configure extend ODB depth */
  509. val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
  510. tmp = cpu_to_le32(val);
  511. roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
  512. ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
  513. db->ext_db->eodb_dep);
  514. roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
  515. ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
  516. db->ext_db->eodb_dep);
  517. val = le32_to_cpu(tmp);
  518. roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
  519. dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
  520. dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
  521. ext_odb_alept, ext_odb_alful);
  522. }
  523. static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
  524. u32 odb_ext_mod)
  525. {
  526. struct device *dev = &hr_dev->pdev->dev;
  527. struct hns_roce_v1_priv *priv;
  528. struct hns_roce_db_table *db;
  529. dma_addr_t sdb_dma_addr;
  530. dma_addr_t odb_dma_addr;
  531. int ret = 0;
  532. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  533. db = &priv->db_table;
  534. db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
  535. if (!db->ext_db)
  536. return -ENOMEM;
  537. if (sdb_ext_mod) {
  538. db->ext_db->sdb_buf_list = kmalloc(
  539. sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
  540. if (!db->ext_db->sdb_buf_list) {
  541. ret = -ENOMEM;
  542. goto ext_sdb_buf_fail_out;
  543. }
  544. db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
  545. HNS_ROCE_V1_EXT_SDB_SIZE,
  546. &sdb_dma_addr, GFP_KERNEL);
  547. if (!db->ext_db->sdb_buf_list->buf) {
  548. ret = -ENOMEM;
  549. goto alloc_sq_db_buf_fail;
  550. }
  551. db->ext_db->sdb_buf_list->map = sdb_dma_addr;
  552. db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
  553. hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
  554. HNS_ROCE_V1_EXT_SDB_ALFUL);
  555. } else
  556. hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
  557. HNS_ROCE_V1_SDB_ALFUL);
  558. if (odb_ext_mod) {
  559. db->ext_db->odb_buf_list = kmalloc(
  560. sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
  561. if (!db->ext_db->odb_buf_list) {
  562. ret = -ENOMEM;
  563. goto ext_odb_buf_fail_out;
  564. }
  565. db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
  566. HNS_ROCE_V1_EXT_ODB_SIZE,
  567. &odb_dma_addr, GFP_KERNEL);
  568. if (!db->ext_db->odb_buf_list->buf) {
  569. ret = -ENOMEM;
  570. goto alloc_otr_db_buf_fail;
  571. }
  572. db->ext_db->odb_buf_list->map = odb_dma_addr;
  573. db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
  574. hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
  575. HNS_ROCE_V1_EXT_ODB_ALFUL);
  576. } else
  577. hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
  578. HNS_ROCE_V1_ODB_ALFUL);
  579. hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
  580. return 0;
  581. alloc_otr_db_buf_fail:
  582. kfree(db->ext_db->odb_buf_list);
  583. ext_odb_buf_fail_out:
  584. if (sdb_ext_mod) {
  585. dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
  586. db->ext_db->sdb_buf_list->buf,
  587. db->ext_db->sdb_buf_list->map);
  588. }
  589. alloc_sq_db_buf_fail:
  590. if (sdb_ext_mod)
  591. kfree(db->ext_db->sdb_buf_list);
  592. ext_sdb_buf_fail_out:
  593. kfree(db->ext_db);
  594. return ret;
  595. }
  596. static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
  597. struct ib_pd *pd)
  598. {
  599. struct device *dev = &hr_dev->pdev->dev;
  600. struct ib_qp_init_attr init_attr;
  601. struct ib_qp *qp;
  602. memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
  603. init_attr.qp_type = IB_QPT_RC;
  604. init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
  605. init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
  606. init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
  607. qp = hns_roce_create_qp(pd, &init_attr, NULL);
  608. if (IS_ERR(qp)) {
  609. dev_err(dev, "Create loop qp for mr free failed!");
  610. return NULL;
  611. }
  612. return to_hr_qp(qp);
  613. }
  614. static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
  615. {
  616. struct hns_roce_caps *caps = &hr_dev->caps;
  617. struct device *dev = &hr_dev->pdev->dev;
  618. struct ib_cq_init_attr cq_init_attr;
  619. struct hns_roce_free_mr *free_mr;
  620. struct ib_qp_attr attr = { 0 };
  621. struct hns_roce_v1_priv *priv;
  622. struct hns_roce_qp *hr_qp;
  623. struct ib_cq *cq;
  624. struct ib_pd *pd;
  625. union ib_gid dgid;
  626. u64 subnet_prefix;
  627. int attr_mask = 0;
  628. int i, j;
  629. int ret;
  630. u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
  631. u8 phy_port;
  632. u8 port = 0;
  633. u8 sl;
  634. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  635. free_mr = &priv->free_mr;
  636. /* Reserved cq for loop qp */
  637. cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
  638. cq_init_attr.comp_vector = 0;
  639. cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL);
  640. if (IS_ERR(cq)) {
  641. dev_err(dev, "Create cq for reserved loop qp failed!");
  642. return -ENOMEM;
  643. }
  644. free_mr->mr_free_cq = to_hr_cq(cq);
  645. free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev;
  646. free_mr->mr_free_cq->ib_cq.uobject = NULL;
  647. free_mr->mr_free_cq->ib_cq.comp_handler = NULL;
  648. free_mr->mr_free_cq->ib_cq.event_handler = NULL;
  649. free_mr->mr_free_cq->ib_cq.cq_context = NULL;
  650. atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
  651. pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL);
  652. if (IS_ERR(pd)) {
  653. dev_err(dev, "Create pd for reserved loop qp failed!");
  654. ret = -ENOMEM;
  655. goto alloc_pd_failed;
  656. }
  657. free_mr->mr_free_pd = to_hr_pd(pd);
  658. free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
  659. free_mr->mr_free_pd->ibpd.uobject = NULL;
  660. free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
  661. atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
  662. attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
  663. attr.pkey_index = 0;
  664. attr.min_rnr_timer = 0;
  665. /* Disable read ability */
  666. attr.max_dest_rd_atomic = 0;
  667. attr.max_rd_atomic = 0;
  668. /* Use arbitrary values as rq_psn and sq_psn */
  669. attr.rq_psn = 0x0808;
  670. attr.sq_psn = 0x0808;
  671. attr.retry_cnt = 7;
  672. attr.rnr_retry = 7;
  673. attr.timeout = 0x12;
  674. attr.path_mtu = IB_MTU_256;
  675. attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
  676. rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
  677. rdma_ah_set_static_rate(&attr.ah_attr, 3);
  678. subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
  679. for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
  680. phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
  681. (i % HNS_ROCE_MAX_PORTS);
  682. sl = i / HNS_ROCE_MAX_PORTS;
  683. for (j = 0; j < caps->num_ports; j++) {
  684. if (hr_dev->iboe.phy_port[j] == phy_port) {
  685. queue_en[i] = 1;
  686. port = j;
  687. break;
  688. }
  689. }
  690. if (!queue_en[i])
  691. continue;
  692. free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
  693. if (!free_mr->mr_free_qp[i]) {
  694. dev_err(dev, "Create loop qp failed!\n");
  695. ret = -ENOMEM;
  696. goto create_lp_qp_failed;
  697. }
  698. hr_qp = free_mr->mr_free_qp[i];
  699. hr_qp->port = port;
  700. hr_qp->phy_port = phy_port;
  701. hr_qp->ibqp.qp_type = IB_QPT_RC;
  702. hr_qp->ibqp.device = &hr_dev->ib_dev;
  703. hr_qp->ibqp.uobject = NULL;
  704. atomic_set(&hr_qp->ibqp.usecnt, 0);
  705. hr_qp->ibqp.pd = pd;
  706. hr_qp->ibqp.recv_cq = cq;
  707. hr_qp->ibqp.send_cq = cq;
  708. rdma_ah_set_port_num(&attr.ah_attr, port + 1);
  709. rdma_ah_set_sl(&attr.ah_attr, sl);
  710. attr.port_num = port + 1;
  711. attr.dest_qp_num = hr_qp->qpn;
  712. memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
  713. hr_dev->dev_addr[port],
  714. MAC_ADDR_OCTET_NUM);
  715. memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
  716. memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
  717. memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
  718. dgid.raw[11] = 0xff;
  719. dgid.raw[12] = 0xfe;
  720. dgid.raw[8] ^= 2;
  721. rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
  722. ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
  723. IB_QPS_RESET, IB_QPS_INIT);
  724. if (ret) {
  725. dev_err(dev, "modify qp failed(%d)!\n", ret);
  726. goto create_lp_qp_failed;
  727. }
  728. ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
  729. IB_QPS_INIT, IB_QPS_RTR);
  730. if (ret) {
  731. dev_err(dev, "modify qp failed(%d)!\n", ret);
  732. goto create_lp_qp_failed;
  733. }
  734. ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
  735. IB_QPS_RTR, IB_QPS_RTS);
  736. if (ret) {
  737. dev_err(dev, "modify qp failed(%d)!\n", ret);
  738. goto create_lp_qp_failed;
  739. }
  740. }
  741. return 0;
  742. create_lp_qp_failed:
  743. for (i -= 1; i >= 0; i--) {
  744. hr_qp = free_mr->mr_free_qp[i];
  745. if (hns_roce_v1_destroy_qp(&hr_qp->ibqp))
  746. dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
  747. }
  748. if (hns_roce_dealloc_pd(pd))
  749. dev_err(dev, "Destroy pd for create_lp_qp failed!\n");
  750. alloc_pd_failed:
  751. if (hns_roce_ib_destroy_cq(cq))
  752. dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
  753. return ret;
  754. }
  755. static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
  756. {
  757. struct device *dev = &hr_dev->pdev->dev;
  758. struct hns_roce_free_mr *free_mr;
  759. struct hns_roce_v1_priv *priv;
  760. struct hns_roce_qp *hr_qp;
  761. int ret;
  762. int i;
  763. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  764. free_mr = &priv->free_mr;
  765. for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
  766. hr_qp = free_mr->mr_free_qp[i];
  767. if (!hr_qp)
  768. continue;
  769. ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
  770. if (ret)
  771. dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
  772. i, ret);
  773. }
  774. ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq);
  775. if (ret)
  776. dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
  777. ret = hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd);
  778. if (ret)
  779. dev_err(dev, "Destroy pd for mr_free failed(%d)!\n", ret);
  780. }
  781. static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
  782. {
  783. struct device *dev = &hr_dev->pdev->dev;
  784. struct hns_roce_v1_priv *priv;
  785. struct hns_roce_db_table *db;
  786. u32 sdb_ext_mod;
  787. u32 odb_ext_mod;
  788. u32 sdb_evt_mod;
  789. u32 odb_evt_mod;
  790. int ret = 0;
  791. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  792. db = &priv->db_table;
  793. memset(db, 0, sizeof(*db));
  794. /* Default DB mode */
  795. sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
  796. odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
  797. sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
  798. odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
  799. db->sdb_ext_mod = sdb_ext_mod;
  800. db->odb_ext_mod = odb_ext_mod;
  801. /* Init extend DB */
  802. ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
  803. if (ret) {
  804. dev_err(dev, "Failed in extend DB configuration.\n");
  805. return ret;
  806. }
  807. hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
  808. return 0;
  809. }
  810. static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
  811. {
  812. struct hns_roce_recreate_lp_qp_work *lp_qp_work;
  813. struct hns_roce_dev *hr_dev;
  814. lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
  815. work);
  816. hr_dev = to_hr_dev(lp_qp_work->ib_dev);
  817. hns_roce_v1_release_lp_qp(hr_dev);
  818. if (hns_roce_v1_rsv_lp_qp(hr_dev))
  819. dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
  820. if (lp_qp_work->comp_flag)
  821. complete(lp_qp_work->comp);
  822. kfree(lp_qp_work);
  823. }
  824. static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
  825. {
  826. struct device *dev = &hr_dev->pdev->dev;
  827. struct hns_roce_recreate_lp_qp_work *lp_qp_work;
  828. struct hns_roce_free_mr *free_mr;
  829. struct hns_roce_v1_priv *priv;
  830. struct completion comp;
  831. unsigned long end =
  832. msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
  833. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  834. free_mr = &priv->free_mr;
  835. lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
  836. GFP_KERNEL);
  837. if (!lp_qp_work)
  838. return -ENOMEM;
  839. INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
  840. lp_qp_work->ib_dev = &(hr_dev->ib_dev);
  841. lp_qp_work->comp = &comp;
  842. lp_qp_work->comp_flag = 1;
  843. init_completion(lp_qp_work->comp);
  844. queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
  845. while (time_before_eq(jiffies, end)) {
  846. if (try_wait_for_completion(&comp))
  847. return 0;
  848. msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
  849. }
  850. lp_qp_work->comp_flag = 0;
  851. if (try_wait_for_completion(&comp))
  852. return 0;
  853. dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
  854. return -ETIMEDOUT;
  855. }
  856. static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
  857. {
  858. struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
  859. struct device *dev = &hr_dev->pdev->dev;
  860. struct ib_send_wr send_wr;
  861. const struct ib_send_wr *bad_wr;
  862. int ret;
  863. memset(&send_wr, 0, sizeof(send_wr));
  864. send_wr.next = NULL;
  865. send_wr.num_sge = 0;
  866. send_wr.send_flags = 0;
  867. send_wr.sg_list = NULL;
  868. send_wr.wr_id = (unsigned long long)&send_wr;
  869. send_wr.opcode = IB_WR_RDMA_WRITE;
  870. ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
  871. if (ret) {
  872. dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
  873. return ret;
  874. }
  875. return 0;
  876. }
  877. static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
  878. {
  879. struct hns_roce_mr_free_work *mr_work;
  880. struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
  881. struct hns_roce_free_mr *free_mr;
  882. struct hns_roce_cq *mr_free_cq;
  883. struct hns_roce_v1_priv *priv;
  884. struct hns_roce_dev *hr_dev;
  885. struct hns_roce_mr *hr_mr;
  886. struct hns_roce_qp *hr_qp;
  887. struct device *dev;
  888. unsigned long end =
  889. msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
  890. int i;
  891. int ret;
  892. int ne = 0;
  893. mr_work = container_of(work, struct hns_roce_mr_free_work, work);
  894. hr_mr = (struct hns_roce_mr *)mr_work->mr;
  895. hr_dev = to_hr_dev(mr_work->ib_dev);
  896. dev = &hr_dev->pdev->dev;
  897. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  898. free_mr = &priv->free_mr;
  899. mr_free_cq = free_mr->mr_free_cq;
  900. for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
  901. hr_qp = free_mr->mr_free_qp[i];
  902. if (!hr_qp)
  903. continue;
  904. ne++;
  905. ret = hns_roce_v1_send_lp_wqe(hr_qp);
  906. if (ret) {
  907. dev_err(dev,
  908. "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
  909. hr_qp->qpn, ret);
  910. goto free_work;
  911. }
  912. }
  913. if (!ne) {
  914. dev_err(dev, "Reserved loop qp is absent!\n");
  915. goto free_work;
  916. }
  917. do {
  918. ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
  919. if (ret < 0 && hr_qp) {
  920. dev_err(dev,
  921. "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
  922. hr_qp->qpn, ret, hr_mr->key, ne);
  923. goto free_work;
  924. }
  925. ne -= ret;
  926. usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
  927. (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
  928. } while (ne && time_before_eq(jiffies, end));
  929. if (ne != 0)
  930. dev_err(dev,
  931. "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
  932. hr_mr->key, ne);
  933. free_work:
  934. if (mr_work->comp_flag)
  935. complete(mr_work->comp);
  936. kfree(mr_work);
  937. }
  938. static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
  939. struct hns_roce_mr *mr)
  940. {
  941. struct device *dev = &hr_dev->pdev->dev;
  942. struct hns_roce_mr_free_work *mr_work;
  943. struct hns_roce_free_mr *free_mr;
  944. struct hns_roce_v1_priv *priv;
  945. struct completion comp;
  946. unsigned long end =
  947. msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
  948. unsigned long start = jiffies;
  949. int npages;
  950. int ret = 0;
  951. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  952. free_mr = &priv->free_mr;
  953. if (mr->enabled) {
  954. if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
  955. & (hr_dev->caps.num_mtpts - 1)))
  956. dev_warn(dev, "HW2SW_MPT failed!\n");
  957. }
  958. mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
  959. if (!mr_work) {
  960. ret = -ENOMEM;
  961. goto free_mr;
  962. }
  963. INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
  964. mr_work->ib_dev = &(hr_dev->ib_dev);
  965. mr_work->comp = &comp;
  966. mr_work->comp_flag = 1;
  967. mr_work->mr = (void *)mr;
  968. init_completion(mr_work->comp);
  969. queue_work(free_mr->free_mr_wq, &(mr_work->work));
  970. while (time_before_eq(jiffies, end)) {
  971. if (try_wait_for_completion(&comp))
  972. goto free_mr;
  973. msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
  974. }
  975. mr_work->comp_flag = 0;
  976. if (try_wait_for_completion(&comp))
  977. goto free_mr;
  978. dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
  979. ret = -ETIMEDOUT;
  980. free_mr:
  981. dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
  982. mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
  983. if (mr->size != ~0ULL) {
  984. npages = ib_umem_page_count(mr->umem);
  985. dma_free_coherent(dev, npages * 8, mr->pbl_buf,
  986. mr->pbl_dma_addr);
  987. }
  988. hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
  989. key_to_hw_index(mr->key), 0);
  990. if (mr->umem)
  991. ib_umem_release(mr->umem);
  992. kfree(mr);
  993. return ret;
  994. }
  995. static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
  996. {
  997. struct device *dev = &hr_dev->pdev->dev;
  998. struct hns_roce_v1_priv *priv;
  999. struct hns_roce_db_table *db;
  1000. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1001. db = &priv->db_table;
  1002. if (db->sdb_ext_mod) {
  1003. dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
  1004. db->ext_db->sdb_buf_list->buf,
  1005. db->ext_db->sdb_buf_list->map);
  1006. kfree(db->ext_db->sdb_buf_list);
  1007. }
  1008. if (db->odb_ext_mod) {
  1009. dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
  1010. db->ext_db->odb_buf_list->buf,
  1011. db->ext_db->odb_buf_list->map);
  1012. kfree(db->ext_db->odb_buf_list);
  1013. }
  1014. kfree(db->ext_db);
  1015. }
  1016. static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
  1017. {
  1018. int ret;
  1019. u32 val;
  1020. __le32 tmp;
  1021. int raq_shift = 0;
  1022. dma_addr_t addr;
  1023. struct hns_roce_v1_priv *priv;
  1024. struct hns_roce_raq_table *raq;
  1025. struct device *dev = &hr_dev->pdev->dev;
  1026. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1027. raq = &priv->raq_table;
  1028. raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
  1029. if (!raq->e_raq_buf)
  1030. return -ENOMEM;
  1031. raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
  1032. &addr, GFP_KERNEL);
  1033. if (!raq->e_raq_buf->buf) {
  1034. ret = -ENOMEM;
  1035. goto err_dma_alloc_raq;
  1036. }
  1037. raq->e_raq_buf->map = addr;
  1038. /* Configure raq extended address. 48bit 4K align*/
  1039. roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
  1040. /* Configure raq_shift */
  1041. raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
  1042. val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
  1043. tmp = cpu_to_le32(val);
  1044. roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
  1045. ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
  1046. /*
  1047. * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
  1048. * using 4K page, and shift more 32 because of
  1049. * caculating the high 32 bit value evaluated to hardware.
  1050. */
  1051. roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
  1052. ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
  1053. raq->e_raq_buf->map >> 44);
  1054. val = le32_to_cpu(tmp);
  1055. roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
  1056. dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
  1057. /* Configure raq threshold */
  1058. val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
  1059. tmp = cpu_to_le32(val);
  1060. roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
  1061. ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
  1062. HNS_ROCE_V1_EXT_RAQ_WF);
  1063. val = le32_to_cpu(tmp);
  1064. roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
  1065. dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
  1066. /* Enable extend raq */
  1067. val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
  1068. tmp = cpu_to_le32(val);
  1069. roce_set_field(tmp,
  1070. ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
  1071. ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
  1072. POL_TIME_INTERVAL_VAL);
  1073. roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
  1074. roce_set_field(tmp,
  1075. ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
  1076. ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
  1077. 2);
  1078. roce_set_bit(tmp,
  1079. ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
  1080. val = le32_to_cpu(tmp);
  1081. roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
  1082. dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
  1083. /* Enable raq drop */
  1084. val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
  1085. tmp = cpu_to_le32(val);
  1086. roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
  1087. val = le32_to_cpu(tmp);
  1088. roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
  1089. dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
  1090. return 0;
  1091. err_dma_alloc_raq:
  1092. kfree(raq->e_raq_buf);
  1093. return ret;
  1094. }
  1095. static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
  1096. {
  1097. struct device *dev = &hr_dev->pdev->dev;
  1098. struct hns_roce_v1_priv *priv;
  1099. struct hns_roce_raq_table *raq;
  1100. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1101. raq = &priv->raq_table;
  1102. dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
  1103. raq->e_raq_buf->map);
  1104. kfree(raq->e_raq_buf);
  1105. }
  1106. static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
  1107. {
  1108. __le32 tmp;
  1109. u32 val;
  1110. if (enable_flag) {
  1111. val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
  1112. /* Open all ports */
  1113. tmp = cpu_to_le32(val);
  1114. roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
  1115. ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
  1116. ALL_PORT_VAL_OPEN);
  1117. val = le32_to_cpu(tmp);
  1118. roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
  1119. } else {
  1120. val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
  1121. /* Close all ports */
  1122. tmp = cpu_to_le32(val);
  1123. roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
  1124. ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
  1125. val = le32_to_cpu(tmp);
  1126. roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
  1127. }
  1128. }
  1129. static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
  1130. {
  1131. struct device *dev = &hr_dev->pdev->dev;
  1132. struct hns_roce_v1_priv *priv;
  1133. int ret;
  1134. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1135. priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
  1136. HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
  1137. GFP_KERNEL);
  1138. if (!priv->bt_table.qpc_buf.buf)
  1139. return -ENOMEM;
  1140. priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
  1141. HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
  1142. GFP_KERNEL);
  1143. if (!priv->bt_table.mtpt_buf.buf) {
  1144. ret = -ENOMEM;
  1145. goto err_failed_alloc_mtpt_buf;
  1146. }
  1147. priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
  1148. HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
  1149. GFP_KERNEL);
  1150. if (!priv->bt_table.cqc_buf.buf) {
  1151. ret = -ENOMEM;
  1152. goto err_failed_alloc_cqc_buf;
  1153. }
  1154. return 0;
  1155. err_failed_alloc_cqc_buf:
  1156. dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
  1157. priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
  1158. err_failed_alloc_mtpt_buf:
  1159. dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
  1160. priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
  1161. return ret;
  1162. }
  1163. static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
  1164. {
  1165. struct device *dev = &hr_dev->pdev->dev;
  1166. struct hns_roce_v1_priv *priv;
  1167. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1168. dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
  1169. priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
  1170. dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
  1171. priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
  1172. dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
  1173. priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
  1174. }
  1175. static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
  1176. {
  1177. struct device *dev = &hr_dev->pdev->dev;
  1178. struct hns_roce_buf_list *tptr_buf;
  1179. struct hns_roce_v1_priv *priv;
  1180. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1181. tptr_buf = &priv->tptr_table.tptr_buf;
  1182. /*
  1183. * This buffer will be used for CQ's tptr(tail pointer), also
  1184. * named ci(customer index). Every CQ will use 2 bytes to save
  1185. * cqe ci in hip06. Hardware will read this area to get new ci
  1186. * when the queue is almost full.
  1187. */
  1188. tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
  1189. &tptr_buf->map, GFP_KERNEL);
  1190. if (!tptr_buf->buf)
  1191. return -ENOMEM;
  1192. hr_dev->tptr_dma_addr = tptr_buf->map;
  1193. hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
  1194. return 0;
  1195. }
  1196. static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
  1197. {
  1198. struct device *dev = &hr_dev->pdev->dev;
  1199. struct hns_roce_buf_list *tptr_buf;
  1200. struct hns_roce_v1_priv *priv;
  1201. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1202. tptr_buf = &priv->tptr_table.tptr_buf;
  1203. dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
  1204. tptr_buf->buf, tptr_buf->map);
  1205. }
  1206. static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
  1207. {
  1208. struct device *dev = &hr_dev->pdev->dev;
  1209. struct hns_roce_free_mr *free_mr;
  1210. struct hns_roce_v1_priv *priv;
  1211. int ret = 0;
  1212. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1213. free_mr = &priv->free_mr;
  1214. free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
  1215. if (!free_mr->free_mr_wq) {
  1216. dev_err(dev, "Create free mr workqueue failed!\n");
  1217. return -ENOMEM;
  1218. }
  1219. ret = hns_roce_v1_rsv_lp_qp(hr_dev);
  1220. if (ret) {
  1221. dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
  1222. flush_workqueue(free_mr->free_mr_wq);
  1223. destroy_workqueue(free_mr->free_mr_wq);
  1224. }
  1225. return ret;
  1226. }
  1227. static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
  1228. {
  1229. struct hns_roce_free_mr *free_mr;
  1230. struct hns_roce_v1_priv *priv;
  1231. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1232. free_mr = &priv->free_mr;
  1233. flush_workqueue(free_mr->free_mr_wq);
  1234. destroy_workqueue(free_mr->free_mr_wq);
  1235. hns_roce_v1_release_lp_qp(hr_dev);
  1236. }
  1237. /**
  1238. * hns_roce_v1_reset - reset RoCE
  1239. * @hr_dev: RoCE device struct pointer
  1240. * @enable: true -- drop reset, false -- reset
  1241. * return 0 - success , negative --fail
  1242. */
  1243. static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
  1244. {
  1245. struct device_node *dsaf_node;
  1246. struct device *dev = &hr_dev->pdev->dev;
  1247. struct device_node *np = dev->of_node;
  1248. struct fwnode_handle *fwnode;
  1249. int ret;
  1250. /* check if this is DT/ACPI case */
  1251. if (dev_of_node(dev)) {
  1252. dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
  1253. if (!dsaf_node) {
  1254. dev_err(dev, "could not find dsaf-handle\n");
  1255. return -EINVAL;
  1256. }
  1257. fwnode = &dsaf_node->fwnode;
  1258. } else if (is_acpi_device_node(dev->fwnode)) {
  1259. struct fwnode_reference_args args;
  1260. ret = acpi_node_get_property_reference(dev->fwnode,
  1261. "dsaf-handle", 0, &args);
  1262. if (ret) {
  1263. dev_err(dev, "could not find dsaf-handle\n");
  1264. return ret;
  1265. }
  1266. fwnode = args.fwnode;
  1267. } else {
  1268. dev_err(dev, "cannot read data from DT or ACPI\n");
  1269. return -ENXIO;
  1270. }
  1271. ret = hns_dsaf_roce_reset(fwnode, false);
  1272. if (ret)
  1273. return ret;
  1274. if (dereset) {
  1275. msleep(SLEEP_TIME_INTERVAL);
  1276. ret = hns_dsaf_roce_reset(fwnode, true);
  1277. }
  1278. return ret;
  1279. }
  1280. static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev)
  1281. {
  1282. struct device *dev = &hr_dev->pdev->dev;
  1283. struct hns_roce_v1_priv *priv;
  1284. struct hns_roce_des_qp *des_qp;
  1285. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1286. des_qp = &priv->des_qp;
  1287. des_qp->requeue_flag = 1;
  1288. des_qp->qp_wq = create_singlethread_workqueue("hns_roce_destroy_qp");
  1289. if (!des_qp->qp_wq) {
  1290. dev_err(dev, "Create destroy qp workqueue failed!\n");
  1291. return -ENOMEM;
  1292. }
  1293. return 0;
  1294. }
  1295. static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
  1296. {
  1297. struct hns_roce_v1_priv *priv;
  1298. struct hns_roce_des_qp *des_qp;
  1299. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1300. des_qp = &priv->des_qp;
  1301. des_qp->requeue_flag = 0;
  1302. flush_workqueue(des_qp->qp_wq);
  1303. destroy_workqueue(des_qp->qp_wq);
  1304. }
  1305. static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
  1306. {
  1307. int i = 0;
  1308. struct hns_roce_caps *caps = &hr_dev->caps;
  1309. hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG);
  1310. hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG);
  1311. hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) |
  1312. ((u64)roce_read(hr_dev,
  1313. ROCEE_SYS_IMAGE_GUID_H_REG) << 32);
  1314. hr_dev->hw_rev = HNS_ROCE_HW_VER1;
  1315. caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
  1316. caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
  1317. caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
  1318. caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
  1319. caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
  1320. caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
  1321. caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
  1322. caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
  1323. caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
  1324. caps->num_uars = HNS_ROCE_V1_UAR_NUM;
  1325. caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
  1326. caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM;
  1327. caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM;
  1328. caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
  1329. caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
  1330. caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
  1331. caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
  1332. caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
  1333. caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
  1334. caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
  1335. caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
  1336. caps->qpc_entry_sz = HNS_ROCE_V1_QPC_ENTRY_SIZE;
  1337. caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
  1338. caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE;
  1339. caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
  1340. caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
  1341. caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE;
  1342. caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
  1343. caps->reserved_lkey = 0;
  1344. caps->reserved_pds = 0;
  1345. caps->reserved_mrws = 1;
  1346. caps->reserved_uars = 0;
  1347. caps->reserved_cqs = 0;
  1348. caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
  1349. for (i = 0; i < caps->num_ports; i++)
  1350. caps->pkey_table_len[i] = 1;
  1351. for (i = 0; i < caps->num_ports; i++) {
  1352. /* Six ports shared 16 GID in v1 engine */
  1353. if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
  1354. caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
  1355. caps->num_ports;
  1356. else
  1357. caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
  1358. caps->num_ports + 1;
  1359. }
  1360. caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
  1361. caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
  1362. caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG);
  1363. caps->max_mtu = IB_MTU_2048;
  1364. return 0;
  1365. }
  1366. static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
  1367. {
  1368. int ret;
  1369. u32 val;
  1370. __le32 tmp;
  1371. struct device *dev = &hr_dev->pdev->dev;
  1372. /* DMAE user config */
  1373. val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
  1374. tmp = cpu_to_le32(val);
  1375. roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
  1376. ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
  1377. roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
  1378. ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
  1379. 1 << PAGES_SHIFT_16);
  1380. val = le32_to_cpu(tmp);
  1381. roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
  1382. val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
  1383. tmp = cpu_to_le32(val);
  1384. roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
  1385. ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
  1386. roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
  1387. ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
  1388. 1 << PAGES_SHIFT_16);
  1389. ret = hns_roce_db_init(hr_dev);
  1390. if (ret) {
  1391. dev_err(dev, "doorbell init failed!\n");
  1392. return ret;
  1393. }
  1394. ret = hns_roce_raq_init(hr_dev);
  1395. if (ret) {
  1396. dev_err(dev, "raq init failed!\n");
  1397. goto error_failed_raq_init;
  1398. }
  1399. ret = hns_roce_bt_init(hr_dev);
  1400. if (ret) {
  1401. dev_err(dev, "bt init failed!\n");
  1402. goto error_failed_bt_init;
  1403. }
  1404. ret = hns_roce_tptr_init(hr_dev);
  1405. if (ret) {
  1406. dev_err(dev, "tptr init failed!\n");
  1407. goto error_failed_tptr_init;
  1408. }
  1409. ret = hns_roce_des_qp_init(hr_dev);
  1410. if (ret) {
  1411. dev_err(dev, "des qp init failed!\n");
  1412. goto error_failed_des_qp_init;
  1413. }
  1414. ret = hns_roce_free_mr_init(hr_dev);
  1415. if (ret) {
  1416. dev_err(dev, "free mr init failed!\n");
  1417. goto error_failed_free_mr_init;
  1418. }
  1419. hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
  1420. return 0;
  1421. error_failed_free_mr_init:
  1422. hns_roce_des_qp_free(hr_dev);
  1423. error_failed_des_qp_init:
  1424. hns_roce_tptr_free(hr_dev);
  1425. error_failed_tptr_init:
  1426. hns_roce_bt_free(hr_dev);
  1427. error_failed_bt_init:
  1428. hns_roce_raq_free(hr_dev);
  1429. error_failed_raq_init:
  1430. hns_roce_db_free(hr_dev);
  1431. return ret;
  1432. }
  1433. static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
  1434. {
  1435. hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
  1436. hns_roce_free_mr_free(hr_dev);
  1437. hns_roce_des_qp_free(hr_dev);
  1438. hns_roce_tptr_free(hr_dev);
  1439. hns_roce_bt_free(hr_dev);
  1440. hns_roce_raq_free(hr_dev);
  1441. hns_roce_db_free(hr_dev);
  1442. }
  1443. static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
  1444. {
  1445. u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
  1446. return (!!(status & (1 << HCR_GO_BIT)));
  1447. }
  1448. static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
  1449. u64 out_param, u32 in_modifier, u8 op_modifier,
  1450. u16 op, u16 token, int event)
  1451. {
  1452. u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
  1453. unsigned long end;
  1454. u32 val = 0;
  1455. __le32 tmp;
  1456. end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
  1457. while (hns_roce_v1_cmd_pending(hr_dev)) {
  1458. if (time_after(jiffies, end)) {
  1459. dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
  1460. (int)jiffies, (int)end);
  1461. return -EAGAIN;
  1462. }
  1463. cond_resched();
  1464. }
  1465. tmp = cpu_to_le32(val);
  1466. roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
  1467. op);
  1468. roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
  1469. ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
  1470. roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
  1471. roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
  1472. roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M,
  1473. ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
  1474. val = le32_to_cpu(tmp);
  1475. writeq(in_param, hcr + 0);
  1476. writeq(out_param, hcr + 2);
  1477. writel(in_modifier, hcr + 4);
  1478. /* Memory barrier */
  1479. wmb();
  1480. writel(val, hcr + 5);
  1481. mmiowb();
  1482. return 0;
  1483. }
  1484. static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
  1485. unsigned long timeout)
  1486. {
  1487. u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
  1488. unsigned long end = 0;
  1489. u32 status = 0;
  1490. end = msecs_to_jiffies(timeout) + jiffies;
  1491. while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
  1492. cond_resched();
  1493. if (hns_roce_v1_cmd_pending(hr_dev)) {
  1494. dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
  1495. return -ETIMEDOUT;
  1496. }
  1497. status = le32_to_cpu((__force __le32)
  1498. __raw_readl(hcr + HCR_STATUS_OFFSET));
  1499. if ((status & STATUS_MASK) != 0x1) {
  1500. dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
  1501. return -EBUSY;
  1502. }
  1503. return 0;
  1504. }
  1505. static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
  1506. int gid_index, const union ib_gid *gid,
  1507. const struct ib_gid_attr *attr)
  1508. {
  1509. u32 *p = NULL;
  1510. u8 gid_idx = 0;
  1511. gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
  1512. p = (u32 *)&gid->raw[0];
  1513. roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
  1514. (HNS_ROCE_V1_GID_NUM * gid_idx));
  1515. p = (u32 *)&gid->raw[4];
  1516. roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
  1517. (HNS_ROCE_V1_GID_NUM * gid_idx));
  1518. p = (u32 *)&gid->raw[8];
  1519. roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
  1520. (HNS_ROCE_V1_GID_NUM * gid_idx));
  1521. p = (u32 *)&gid->raw[0xc];
  1522. roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
  1523. (HNS_ROCE_V1_GID_NUM * gid_idx));
  1524. return 0;
  1525. }
  1526. static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
  1527. u8 *addr)
  1528. {
  1529. u32 reg_smac_l;
  1530. u16 reg_smac_h;
  1531. __le32 tmp;
  1532. u16 *p_h;
  1533. u32 *p;
  1534. u32 val;
  1535. /*
  1536. * When mac changed, loopback may fail
  1537. * because of smac not equal to dmac.
  1538. * We Need to release and create reserved qp again.
  1539. */
  1540. if (hr_dev->hw->dereg_mr) {
  1541. int ret;
  1542. ret = hns_roce_v1_recreate_lp_qp(hr_dev);
  1543. if (ret && ret != -ETIMEDOUT)
  1544. return ret;
  1545. }
  1546. p = (u32 *)(&addr[0]);
  1547. reg_smac_l = *p;
  1548. roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
  1549. PHY_PORT_OFFSET * phy_port);
  1550. val = roce_read(hr_dev,
  1551. ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
  1552. tmp = cpu_to_le32(val);
  1553. p_h = (u16 *)(&addr[4]);
  1554. reg_smac_h = *p_h;
  1555. roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
  1556. ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
  1557. val = le32_to_cpu(tmp);
  1558. roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
  1559. val);
  1560. return 0;
  1561. }
  1562. static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
  1563. enum ib_mtu mtu)
  1564. {
  1565. __le32 tmp;
  1566. u32 val;
  1567. val = roce_read(hr_dev,
  1568. ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
  1569. tmp = cpu_to_le32(val);
  1570. roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
  1571. ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
  1572. val = le32_to_cpu(tmp);
  1573. roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
  1574. val);
  1575. }
  1576. static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
  1577. unsigned long mtpt_idx)
  1578. {
  1579. struct hns_roce_v1_mpt_entry *mpt_entry;
  1580. struct scatterlist *sg;
  1581. u64 *pages;
  1582. int entry;
  1583. int i;
  1584. /* MPT filled into mailbox buf */
  1585. mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
  1586. memset(mpt_entry, 0, sizeof(*mpt_entry));
  1587. roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
  1588. MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
  1589. roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
  1590. MPT_BYTE_4_KEY_S, mr->key);
  1591. roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
  1592. MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
  1593. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
  1594. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
  1595. (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
  1596. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
  1597. roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
  1598. MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
  1599. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
  1600. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
  1601. (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
  1602. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
  1603. (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
  1604. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
  1605. (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
  1606. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
  1607. 0);
  1608. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
  1609. roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
  1610. MPT_BYTE_12_PBL_ADDR_H_S, 0);
  1611. roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
  1612. MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
  1613. mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova);
  1614. mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32));
  1615. mpt_entry->length = cpu_to_le32((u32)mr->size);
  1616. roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
  1617. MPT_BYTE_28_PD_S, mr->pd);
  1618. roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
  1619. MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
  1620. roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
  1621. MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
  1622. /* DMA memory register */
  1623. if (mr->type == MR_TYPE_DMA)
  1624. return 0;
  1625. pages = (u64 *) __get_free_page(GFP_KERNEL);
  1626. if (!pages)
  1627. return -ENOMEM;
  1628. i = 0;
  1629. for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
  1630. pages[i] = ((u64)sg_dma_address(sg)) >> 12;
  1631. /* Directly record to MTPT table firstly 7 entry */
  1632. if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
  1633. break;
  1634. i++;
  1635. }
  1636. /* Register user mr */
  1637. for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) {
  1638. switch (i) {
  1639. case 0:
  1640. mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
  1641. roce_set_field(mpt_entry->mpt_byte_36,
  1642. MPT_BYTE_36_PA0_H_M,
  1643. MPT_BYTE_36_PA0_H_S,
  1644. (u32)(pages[i] >> PAGES_SHIFT_32));
  1645. break;
  1646. case 1:
  1647. roce_set_field(mpt_entry->mpt_byte_36,
  1648. MPT_BYTE_36_PA1_L_M,
  1649. MPT_BYTE_36_PA1_L_S, (u32)(pages[i]));
  1650. roce_set_field(mpt_entry->mpt_byte_40,
  1651. MPT_BYTE_40_PA1_H_M,
  1652. MPT_BYTE_40_PA1_H_S,
  1653. (u32)(pages[i] >> PAGES_SHIFT_24));
  1654. break;
  1655. case 2:
  1656. roce_set_field(mpt_entry->mpt_byte_40,
  1657. MPT_BYTE_40_PA2_L_M,
  1658. MPT_BYTE_40_PA2_L_S, (u32)(pages[i]));
  1659. roce_set_field(mpt_entry->mpt_byte_44,
  1660. MPT_BYTE_44_PA2_H_M,
  1661. MPT_BYTE_44_PA2_H_S,
  1662. (u32)(pages[i] >> PAGES_SHIFT_16));
  1663. break;
  1664. case 3:
  1665. roce_set_field(mpt_entry->mpt_byte_44,
  1666. MPT_BYTE_44_PA3_L_M,
  1667. MPT_BYTE_44_PA3_L_S, (u32)(pages[i]));
  1668. roce_set_field(mpt_entry->mpt_byte_48,
  1669. MPT_BYTE_48_PA3_H_M,
  1670. MPT_BYTE_48_PA3_H_S,
  1671. (u32)(pages[i] >> PAGES_SHIFT_8));
  1672. break;
  1673. case 4:
  1674. mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
  1675. roce_set_field(mpt_entry->mpt_byte_56,
  1676. MPT_BYTE_56_PA4_H_M,
  1677. MPT_BYTE_56_PA4_H_S,
  1678. (u32)(pages[i] >> PAGES_SHIFT_32));
  1679. break;
  1680. case 5:
  1681. roce_set_field(mpt_entry->mpt_byte_56,
  1682. MPT_BYTE_56_PA5_L_M,
  1683. MPT_BYTE_56_PA5_L_S, (u32)(pages[i]));
  1684. roce_set_field(mpt_entry->mpt_byte_60,
  1685. MPT_BYTE_60_PA5_H_M,
  1686. MPT_BYTE_60_PA5_H_S,
  1687. (u32)(pages[i] >> PAGES_SHIFT_24));
  1688. break;
  1689. case 6:
  1690. roce_set_field(mpt_entry->mpt_byte_60,
  1691. MPT_BYTE_60_PA6_L_M,
  1692. MPT_BYTE_60_PA6_L_S, (u32)(pages[i]));
  1693. roce_set_field(mpt_entry->mpt_byte_64,
  1694. MPT_BYTE_64_PA6_H_M,
  1695. MPT_BYTE_64_PA6_H_S,
  1696. (u32)(pages[i] >> PAGES_SHIFT_16));
  1697. break;
  1698. default:
  1699. break;
  1700. }
  1701. }
  1702. free_page((unsigned long) pages);
  1703. mpt_entry->pbl_addr_l = cpu_to_le32((u32)(mr->pbl_dma_addr));
  1704. roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
  1705. MPT_BYTE_12_PBL_ADDR_H_S,
  1706. ((u32)(mr->pbl_dma_addr >> 32)));
  1707. return 0;
  1708. }
  1709. static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
  1710. {
  1711. return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
  1712. n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
  1713. }
  1714. static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
  1715. {
  1716. struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
  1717. /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
  1718. return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
  1719. !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
  1720. }
  1721. static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
  1722. {
  1723. return get_sw_cqe(hr_cq, hr_cq->cons_index);
  1724. }
  1725. static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
  1726. {
  1727. __le32 doorbell[2];
  1728. doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1));
  1729. doorbell[1] = 0;
  1730. roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
  1731. roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
  1732. ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
  1733. roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
  1734. ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
  1735. roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
  1736. ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
  1737. hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
  1738. }
  1739. static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
  1740. struct hns_roce_srq *srq)
  1741. {
  1742. struct hns_roce_cqe *cqe, *dest;
  1743. u32 prod_index;
  1744. int nfreed = 0;
  1745. u8 owner_bit;
  1746. for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
  1747. ++prod_index) {
  1748. if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
  1749. break;
  1750. }
  1751. /*
  1752. * Now backwards through the CQ, removing CQ entries
  1753. * that match our QP by overwriting them with next entries.
  1754. */
  1755. while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
  1756. cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
  1757. if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
  1758. CQE_BYTE_16_LOCAL_QPN_S) &
  1759. HNS_ROCE_CQE_QPN_MASK) == qpn) {
  1760. /* In v1 engine, not support SRQ */
  1761. ++nfreed;
  1762. } else if (nfreed) {
  1763. dest = get_cqe(hr_cq, (prod_index + nfreed) &
  1764. hr_cq->ib_cq.cqe);
  1765. owner_bit = roce_get_bit(dest->cqe_byte_4,
  1766. CQE_BYTE_4_OWNER_S);
  1767. memcpy(dest, cqe, sizeof(*cqe));
  1768. roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
  1769. owner_bit);
  1770. }
  1771. }
  1772. if (nfreed) {
  1773. hr_cq->cons_index += nfreed;
  1774. /*
  1775. * Make sure update of buffer contents is done before
  1776. * updating consumer index.
  1777. */
  1778. wmb();
  1779. hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
  1780. }
  1781. }
  1782. static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
  1783. struct hns_roce_srq *srq)
  1784. {
  1785. spin_lock_irq(&hr_cq->lock);
  1786. __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
  1787. spin_unlock_irq(&hr_cq->lock);
  1788. }
  1789. static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
  1790. struct hns_roce_cq *hr_cq, void *mb_buf,
  1791. u64 *mtts, dma_addr_t dma_handle, int nent,
  1792. u32 vector)
  1793. {
  1794. struct hns_roce_cq_context *cq_context = NULL;
  1795. struct hns_roce_buf_list *tptr_buf;
  1796. struct hns_roce_v1_priv *priv;
  1797. dma_addr_t tptr_dma_addr;
  1798. int offset;
  1799. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1800. tptr_buf = &priv->tptr_table.tptr_buf;
  1801. cq_context = mb_buf;
  1802. memset(cq_context, 0, sizeof(*cq_context));
  1803. /* Get the tptr for this CQ. */
  1804. offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
  1805. tptr_dma_addr = tptr_buf->map + offset;
  1806. hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
  1807. /* Register cq_context members */
  1808. roce_set_field(cq_context->cqc_byte_4,
  1809. CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
  1810. CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
  1811. roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
  1812. CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
  1813. cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle);
  1814. roce_set_field(cq_context->cqc_byte_12,
  1815. CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
  1816. CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
  1817. ((u64)dma_handle >> 32));
  1818. roce_set_field(cq_context->cqc_byte_12,
  1819. CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
  1820. CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
  1821. ilog2((unsigned int)nent));
  1822. roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
  1823. CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
  1824. cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
  1825. roce_set_field(cq_context->cqc_byte_20,
  1826. CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
  1827. CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32);
  1828. /* Dedicated hardware, directly set 0 */
  1829. roce_set_field(cq_context->cqc_byte_20,
  1830. CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
  1831. CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
  1832. /**
  1833. * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
  1834. * using 4K page, and shift more 32 because of
  1835. * caculating the high 32 bit value evaluated to hardware.
  1836. */
  1837. roce_set_field(cq_context->cqc_byte_20,
  1838. CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
  1839. CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
  1840. tptr_dma_addr >> 44);
  1841. cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12));
  1842. roce_set_field(cq_context->cqc_byte_32,
  1843. CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
  1844. CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
  1845. roce_set_bit(cq_context->cqc_byte_32,
  1846. CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
  1847. roce_set_bit(cq_context->cqc_byte_32,
  1848. CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
  1849. roce_set_bit(cq_context->cqc_byte_32,
  1850. CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
  1851. roce_set_bit(cq_context->cqc_byte_32,
  1852. CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
  1853. 0);
  1854. /* The initial value of cq's ci is 0 */
  1855. roce_set_field(cq_context->cqc_byte_32,
  1856. CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
  1857. CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
  1858. }
  1859. static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
  1860. {
  1861. return -EOPNOTSUPP;
  1862. }
  1863. static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
  1864. enum ib_cq_notify_flags flags)
  1865. {
  1866. struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
  1867. u32 notification_flag;
  1868. __le32 doorbell[2];
  1869. notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
  1870. IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
  1871. /*
  1872. * flags = 0; Notification Flag = 1, next
  1873. * flags = 1; Notification Flag = 0, solocited
  1874. */
  1875. doorbell[0] =
  1876. cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
  1877. roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
  1878. roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
  1879. ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
  1880. roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
  1881. ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
  1882. roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
  1883. ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
  1884. hr_cq->cqn | notification_flag);
  1885. hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
  1886. return 0;
  1887. }
  1888. static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
  1889. struct hns_roce_qp **cur_qp, struct ib_wc *wc)
  1890. {
  1891. int qpn;
  1892. int is_send;
  1893. u16 wqe_ctr;
  1894. u32 status;
  1895. u32 opcode;
  1896. struct hns_roce_cqe *cqe;
  1897. struct hns_roce_qp *hr_qp;
  1898. struct hns_roce_wq *wq;
  1899. struct hns_roce_wqe_ctrl_seg *sq_wqe;
  1900. struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
  1901. struct device *dev = &hr_dev->pdev->dev;
  1902. /* Find cqe according consumer index */
  1903. cqe = next_cqe_sw(hr_cq);
  1904. if (!cqe)
  1905. return -EAGAIN;
  1906. ++hr_cq->cons_index;
  1907. /* Memory barrier */
  1908. rmb();
  1909. /* 0->SQ, 1->RQ */
  1910. is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
  1911. /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
  1912. if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
  1913. CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
  1914. qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
  1915. CQE_BYTE_20_PORT_NUM_S) +
  1916. roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
  1917. CQE_BYTE_16_LOCAL_QPN_S) *
  1918. HNS_ROCE_MAX_PORTS;
  1919. } else {
  1920. qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
  1921. CQE_BYTE_16_LOCAL_QPN_S);
  1922. }
  1923. if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
  1924. hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
  1925. if (unlikely(!hr_qp)) {
  1926. dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
  1927. hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
  1928. return -EINVAL;
  1929. }
  1930. *cur_qp = hr_qp;
  1931. }
  1932. wc->qp = &(*cur_qp)->ibqp;
  1933. wc->vendor_err = 0;
  1934. status = roce_get_field(cqe->cqe_byte_4,
  1935. CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
  1936. CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
  1937. HNS_ROCE_CQE_STATUS_MASK;
  1938. switch (status) {
  1939. case HNS_ROCE_CQE_SUCCESS:
  1940. wc->status = IB_WC_SUCCESS;
  1941. break;
  1942. case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
  1943. wc->status = IB_WC_LOC_LEN_ERR;
  1944. break;
  1945. case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
  1946. wc->status = IB_WC_LOC_QP_OP_ERR;
  1947. break;
  1948. case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
  1949. wc->status = IB_WC_LOC_PROT_ERR;
  1950. break;
  1951. case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
  1952. wc->status = IB_WC_WR_FLUSH_ERR;
  1953. break;
  1954. case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
  1955. wc->status = IB_WC_MW_BIND_ERR;
  1956. break;
  1957. case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
  1958. wc->status = IB_WC_BAD_RESP_ERR;
  1959. break;
  1960. case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
  1961. wc->status = IB_WC_LOC_ACCESS_ERR;
  1962. break;
  1963. case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
  1964. wc->status = IB_WC_REM_INV_REQ_ERR;
  1965. break;
  1966. case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
  1967. wc->status = IB_WC_REM_ACCESS_ERR;
  1968. break;
  1969. case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
  1970. wc->status = IB_WC_REM_OP_ERR;
  1971. break;
  1972. case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
  1973. wc->status = IB_WC_RETRY_EXC_ERR;
  1974. break;
  1975. case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
  1976. wc->status = IB_WC_RNR_RETRY_EXC_ERR;
  1977. break;
  1978. default:
  1979. wc->status = IB_WC_GENERAL_ERR;
  1980. break;
  1981. }
  1982. /* CQE status error, directly return */
  1983. if (wc->status != IB_WC_SUCCESS)
  1984. return 0;
  1985. if (is_send) {
  1986. /* SQ conrespond to CQE */
  1987. sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
  1988. CQE_BYTE_4_WQE_INDEX_M,
  1989. CQE_BYTE_4_WQE_INDEX_S)&
  1990. ((*cur_qp)->sq.wqe_cnt-1));
  1991. switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
  1992. case HNS_ROCE_WQE_OPCODE_SEND:
  1993. wc->opcode = IB_WC_SEND;
  1994. break;
  1995. case HNS_ROCE_WQE_OPCODE_RDMA_READ:
  1996. wc->opcode = IB_WC_RDMA_READ;
  1997. wc->byte_len = le32_to_cpu(cqe->byte_cnt);
  1998. break;
  1999. case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
  2000. wc->opcode = IB_WC_RDMA_WRITE;
  2001. break;
  2002. case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
  2003. wc->opcode = IB_WC_LOCAL_INV;
  2004. break;
  2005. case HNS_ROCE_WQE_OPCODE_UD_SEND:
  2006. wc->opcode = IB_WC_SEND;
  2007. break;
  2008. default:
  2009. wc->status = IB_WC_GENERAL_ERR;
  2010. break;
  2011. }
  2012. wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ?
  2013. IB_WC_WITH_IMM : 0);
  2014. wq = &(*cur_qp)->sq;
  2015. if ((*cur_qp)->sq_signal_bits) {
  2016. /*
  2017. * If sg_signal_bit is 1,
  2018. * firstly tail pointer updated to wqe
  2019. * which current cqe correspond to
  2020. */
  2021. wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
  2022. CQE_BYTE_4_WQE_INDEX_M,
  2023. CQE_BYTE_4_WQE_INDEX_S);
  2024. wq->tail += (wqe_ctr - (u16)wq->tail) &
  2025. (wq->wqe_cnt - 1);
  2026. }
  2027. wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
  2028. ++wq->tail;
  2029. } else {
  2030. /* RQ conrespond to CQE */
  2031. wc->byte_len = le32_to_cpu(cqe->byte_cnt);
  2032. opcode = roce_get_field(cqe->cqe_byte_4,
  2033. CQE_BYTE_4_OPERATION_TYPE_M,
  2034. CQE_BYTE_4_OPERATION_TYPE_S) &
  2035. HNS_ROCE_CQE_OPCODE_MASK;
  2036. switch (opcode) {
  2037. case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
  2038. wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
  2039. wc->wc_flags = IB_WC_WITH_IMM;
  2040. wc->ex.imm_data =
  2041. cpu_to_be32(le32_to_cpu(cqe->immediate_data));
  2042. break;
  2043. case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
  2044. if (roce_get_bit(cqe->cqe_byte_4,
  2045. CQE_BYTE_4_IMM_INDICATOR_S)) {
  2046. wc->opcode = IB_WC_RECV;
  2047. wc->wc_flags = IB_WC_WITH_IMM;
  2048. wc->ex.imm_data = cpu_to_be32(
  2049. le32_to_cpu(cqe->immediate_data));
  2050. } else {
  2051. wc->opcode = IB_WC_RECV;
  2052. wc->wc_flags = 0;
  2053. }
  2054. break;
  2055. default:
  2056. wc->status = IB_WC_GENERAL_ERR;
  2057. break;
  2058. }
  2059. /* Update tail pointer, record wr_id */
  2060. wq = &(*cur_qp)->rq;
  2061. wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
  2062. ++wq->tail;
  2063. wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
  2064. CQE_BYTE_20_SL_S);
  2065. wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
  2066. CQE_BYTE_20_REMOTE_QPN_M,
  2067. CQE_BYTE_20_REMOTE_QPN_S);
  2068. wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
  2069. CQE_BYTE_20_GRH_PRESENT_S) ?
  2070. IB_WC_GRH : 0);
  2071. wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
  2072. CQE_BYTE_28_P_KEY_IDX_M,
  2073. CQE_BYTE_28_P_KEY_IDX_S);
  2074. }
  2075. return 0;
  2076. }
  2077. int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  2078. {
  2079. struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
  2080. struct hns_roce_qp *cur_qp = NULL;
  2081. unsigned long flags;
  2082. int npolled;
  2083. int ret = 0;
  2084. spin_lock_irqsave(&hr_cq->lock, flags);
  2085. for (npolled = 0; npolled < num_entries; ++npolled) {
  2086. ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
  2087. if (ret)
  2088. break;
  2089. }
  2090. if (npolled) {
  2091. *hr_cq->tptr_addr = hr_cq->cons_index &
  2092. ((hr_cq->cq_depth << 1) - 1);
  2093. /* Memroy barrier */
  2094. wmb();
  2095. hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
  2096. }
  2097. spin_unlock_irqrestore(&hr_cq->lock, flags);
  2098. if (ret == 0 || ret == -EAGAIN)
  2099. return npolled;
  2100. else
  2101. return ret;
  2102. }
  2103. static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
  2104. struct hns_roce_hem_table *table, int obj,
  2105. int step_idx)
  2106. {
  2107. struct device *dev = &hr_dev->pdev->dev;
  2108. struct hns_roce_v1_priv *priv;
  2109. unsigned long end = 0, flags = 0;
  2110. __le32 bt_cmd_val[2] = {0};
  2111. void __iomem *bt_cmd;
  2112. u64 bt_ba = 0;
  2113. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  2114. switch (table->type) {
  2115. case HEM_TYPE_QPC:
  2116. roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
  2117. ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
  2118. bt_ba = priv->bt_table.qpc_buf.map >> 12;
  2119. break;
  2120. case HEM_TYPE_MTPT:
  2121. roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
  2122. ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT);
  2123. bt_ba = priv->bt_table.mtpt_buf.map >> 12;
  2124. break;
  2125. case HEM_TYPE_CQC:
  2126. roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
  2127. ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
  2128. bt_ba = priv->bt_table.cqc_buf.map >> 12;
  2129. break;
  2130. case HEM_TYPE_SRQC:
  2131. dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
  2132. return -EINVAL;
  2133. default:
  2134. return 0;
  2135. }
  2136. roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
  2137. ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
  2138. roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
  2139. roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
  2140. spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
  2141. bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
  2142. end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
  2143. while (1) {
  2144. if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
  2145. if (!(time_before(jiffies, end))) {
  2146. dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
  2147. spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
  2148. flags);
  2149. return -EBUSY;
  2150. }
  2151. } else {
  2152. break;
  2153. }
  2154. msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
  2155. }
  2156. bt_cmd_val[0] = (__le32)bt_ba;
  2157. roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
  2158. ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
  2159. hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
  2160. spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
  2161. return 0;
  2162. }
  2163. static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
  2164. struct hns_roce_mtt *mtt,
  2165. enum hns_roce_qp_state cur_state,
  2166. enum hns_roce_qp_state new_state,
  2167. struct hns_roce_qp_context *context,
  2168. struct hns_roce_qp *hr_qp)
  2169. {
  2170. static const u16
  2171. op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
  2172. [HNS_ROCE_QP_STATE_RST] = {
  2173. [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
  2174. [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
  2175. [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
  2176. },
  2177. [HNS_ROCE_QP_STATE_INIT] = {
  2178. [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
  2179. [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
  2180. /* Note: In v1 engine, HW doesn't support RST2INIT.
  2181. * We use RST2INIT cmd instead of INIT2INIT.
  2182. */
  2183. [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
  2184. [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
  2185. },
  2186. [HNS_ROCE_QP_STATE_RTR] = {
  2187. [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
  2188. [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
  2189. [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
  2190. },
  2191. [HNS_ROCE_QP_STATE_RTS] = {
  2192. [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
  2193. [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
  2194. [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
  2195. [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
  2196. },
  2197. [HNS_ROCE_QP_STATE_SQD] = {
  2198. [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
  2199. [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
  2200. [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
  2201. [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
  2202. },
  2203. [HNS_ROCE_QP_STATE_ERR] = {
  2204. [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
  2205. [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
  2206. }
  2207. };
  2208. struct hns_roce_cmd_mailbox *mailbox;
  2209. struct device *dev = &hr_dev->pdev->dev;
  2210. int ret = 0;
  2211. if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
  2212. new_state >= HNS_ROCE_QP_NUM_STATE ||
  2213. !op[cur_state][new_state]) {
  2214. dev_err(dev, "[modify_qp]not support state %d to %d\n",
  2215. cur_state, new_state);
  2216. return -EINVAL;
  2217. }
  2218. if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
  2219. return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
  2220. HNS_ROCE_CMD_2RST_QP,
  2221. HNS_ROCE_CMD_TIMEOUT_MSECS);
  2222. if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
  2223. return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
  2224. HNS_ROCE_CMD_2ERR_QP,
  2225. HNS_ROCE_CMD_TIMEOUT_MSECS);
  2226. mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
  2227. if (IS_ERR(mailbox))
  2228. return PTR_ERR(mailbox);
  2229. memcpy(mailbox->buf, context, sizeof(*context));
  2230. ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
  2231. op[cur_state][new_state],
  2232. HNS_ROCE_CMD_TIMEOUT_MSECS);
  2233. hns_roce_free_cmd_mailbox(hr_dev, mailbox);
  2234. return ret;
  2235. }
  2236. static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
  2237. int attr_mask, enum ib_qp_state cur_state,
  2238. enum ib_qp_state new_state)
  2239. {
  2240. struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  2241. struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
  2242. struct hns_roce_sqp_context *context;
  2243. struct device *dev = &hr_dev->pdev->dev;
  2244. dma_addr_t dma_handle = 0;
  2245. u32 __iomem *addr;
  2246. int rq_pa_start;
  2247. __le32 tmp;
  2248. u32 reg_val;
  2249. u64 *mtts;
  2250. context = kzalloc(sizeof(*context), GFP_KERNEL);
  2251. if (!context)
  2252. return -ENOMEM;
  2253. /* Search QP buf's MTTs */
  2254. mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
  2255. hr_qp->mtt.first_seg, &dma_handle);
  2256. if (!mtts) {
  2257. dev_err(dev, "qp buf pa find failed\n");
  2258. goto out;
  2259. }
  2260. if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
  2261. roce_set_field(context->qp1c_bytes_4,
  2262. QP1C_BYTES_4_SQ_WQE_SHIFT_M,
  2263. QP1C_BYTES_4_SQ_WQE_SHIFT_S,
  2264. ilog2((unsigned int)hr_qp->sq.wqe_cnt));
  2265. roce_set_field(context->qp1c_bytes_4,
  2266. QP1C_BYTES_4_RQ_WQE_SHIFT_M,
  2267. QP1C_BYTES_4_RQ_WQE_SHIFT_S,
  2268. ilog2((unsigned int)hr_qp->rq.wqe_cnt));
  2269. roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
  2270. QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
  2271. context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
  2272. roce_set_field(context->qp1c_bytes_12,
  2273. QP1C_BYTES_12_SQ_RQ_BT_H_M,
  2274. QP1C_BYTES_12_SQ_RQ_BT_H_S,
  2275. ((u32)(dma_handle >> 32)));
  2276. roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
  2277. QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
  2278. roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
  2279. QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
  2280. roce_set_bit(context->qp1c_bytes_16,
  2281. QP1C_BYTES_16_SIGNALING_TYPE_S,
  2282. le32_to_cpu(hr_qp->sq_signal_bits));
  2283. roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
  2284. 1);
  2285. roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
  2286. 1);
  2287. roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
  2288. 0);
  2289. roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
  2290. QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
  2291. roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
  2292. QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
  2293. rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
  2294. context->cur_rq_wqe_ba_l =
  2295. cpu_to_le32((u32)(mtts[rq_pa_start]));
  2296. roce_set_field(context->qp1c_bytes_28,
  2297. QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
  2298. QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
  2299. (mtts[rq_pa_start]) >> 32);
  2300. roce_set_field(context->qp1c_bytes_28,
  2301. QP1C_BYTES_28_RQ_CUR_IDX_M,
  2302. QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
  2303. roce_set_field(context->qp1c_bytes_32,
  2304. QP1C_BYTES_32_RX_CQ_NUM_M,
  2305. QP1C_BYTES_32_RX_CQ_NUM_S,
  2306. to_hr_cq(ibqp->recv_cq)->cqn);
  2307. roce_set_field(context->qp1c_bytes_32,
  2308. QP1C_BYTES_32_TX_CQ_NUM_M,
  2309. QP1C_BYTES_32_TX_CQ_NUM_S,
  2310. to_hr_cq(ibqp->send_cq)->cqn);
  2311. context->cur_sq_wqe_ba_l = cpu_to_le32((u32)mtts[0]);
  2312. roce_set_field(context->qp1c_bytes_40,
  2313. QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
  2314. QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
  2315. (mtts[0]) >> 32);
  2316. roce_set_field(context->qp1c_bytes_40,
  2317. QP1C_BYTES_40_SQ_CUR_IDX_M,
  2318. QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
  2319. /* Copy context to QP1C register */
  2320. addr = (u32 __iomem *)(hr_dev->reg_base +
  2321. ROCEE_QP1C_CFG0_0_REG +
  2322. hr_qp->phy_port * sizeof(*context));
  2323. writel(le32_to_cpu(context->qp1c_bytes_4), addr);
  2324. writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1);
  2325. writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2);
  2326. writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3);
  2327. writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4);
  2328. writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5);
  2329. writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6);
  2330. writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7);
  2331. writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8);
  2332. writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9);
  2333. }
  2334. /* Modify QP1C status */
  2335. reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
  2336. hr_qp->phy_port * sizeof(*context));
  2337. tmp = cpu_to_le32(reg_val);
  2338. roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
  2339. ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
  2340. reg_val = le32_to_cpu(tmp);
  2341. roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
  2342. hr_qp->phy_port * sizeof(*context), reg_val);
  2343. hr_qp->state = new_state;
  2344. if (new_state == IB_QPS_RESET) {
  2345. hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
  2346. ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
  2347. if (ibqp->send_cq != ibqp->recv_cq)
  2348. hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
  2349. hr_qp->qpn, NULL);
  2350. hr_qp->rq.head = 0;
  2351. hr_qp->rq.tail = 0;
  2352. hr_qp->sq.head = 0;
  2353. hr_qp->sq.tail = 0;
  2354. hr_qp->sq_next_wqe = 0;
  2355. }
  2356. kfree(context);
  2357. return 0;
  2358. out:
  2359. kfree(context);
  2360. return -EINVAL;
  2361. }
  2362. static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
  2363. int attr_mask, enum ib_qp_state cur_state,
  2364. enum ib_qp_state new_state)
  2365. {
  2366. struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  2367. struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
  2368. struct device *dev = &hr_dev->pdev->dev;
  2369. struct hns_roce_qp_context *context;
  2370. const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
  2371. dma_addr_t dma_handle_2 = 0;
  2372. dma_addr_t dma_handle = 0;
  2373. __le32 doorbell[2] = {0};
  2374. int rq_pa_start = 0;
  2375. u64 *mtts_2 = NULL;
  2376. int ret = -EINVAL;
  2377. u64 *mtts = NULL;
  2378. int port;
  2379. u8 port_num;
  2380. u8 *dmac;
  2381. u8 *smac;
  2382. context = kzalloc(sizeof(*context), GFP_KERNEL);
  2383. if (!context)
  2384. return -ENOMEM;
  2385. /* Search qp buf's mtts */
  2386. mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
  2387. hr_qp->mtt.first_seg, &dma_handle);
  2388. if (mtts == NULL) {
  2389. dev_err(dev, "qp buf pa find failed\n");
  2390. goto out;
  2391. }
  2392. /* Search IRRL's mtts */
  2393. mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
  2394. hr_qp->qpn, &dma_handle_2);
  2395. if (mtts_2 == NULL) {
  2396. dev_err(dev, "qp irrl_table find failed\n");
  2397. goto out;
  2398. }
  2399. /*
  2400. * Reset to init
  2401. * Mandatory param:
  2402. * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
  2403. * Optional param: NA
  2404. */
  2405. if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
  2406. roce_set_field(context->qpc_bytes_4,
  2407. QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
  2408. QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
  2409. to_hr_qp_type(hr_qp->ibqp.qp_type));
  2410. roce_set_bit(context->qpc_bytes_4,
  2411. QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
  2412. roce_set_bit(context->qpc_bytes_4,
  2413. QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
  2414. !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
  2415. roce_set_bit(context->qpc_bytes_4,
  2416. QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
  2417. !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
  2418. );
  2419. roce_set_bit(context->qpc_bytes_4,
  2420. QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
  2421. !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
  2422. );
  2423. roce_set_bit(context->qpc_bytes_4,
  2424. QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
  2425. roce_set_field(context->qpc_bytes_4,
  2426. QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
  2427. QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
  2428. ilog2((unsigned int)hr_qp->sq.wqe_cnt));
  2429. roce_set_field(context->qpc_bytes_4,
  2430. QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
  2431. QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
  2432. ilog2((unsigned int)hr_qp->rq.wqe_cnt));
  2433. roce_set_field(context->qpc_bytes_4,
  2434. QP_CONTEXT_QPC_BYTES_4_PD_M,
  2435. QP_CONTEXT_QPC_BYTES_4_PD_S,
  2436. to_hr_pd(ibqp->pd)->pdn);
  2437. hr_qp->access_flags = attr->qp_access_flags;
  2438. roce_set_field(context->qpc_bytes_8,
  2439. QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
  2440. QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
  2441. to_hr_cq(ibqp->send_cq)->cqn);
  2442. roce_set_field(context->qpc_bytes_8,
  2443. QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
  2444. QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
  2445. to_hr_cq(ibqp->recv_cq)->cqn);
  2446. if (ibqp->srq)
  2447. roce_set_field(context->qpc_bytes_12,
  2448. QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
  2449. QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
  2450. to_hr_srq(ibqp->srq)->srqn);
  2451. roce_set_field(context->qpc_bytes_12,
  2452. QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
  2453. QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
  2454. attr->pkey_index);
  2455. hr_qp->pkey_index = attr->pkey_index;
  2456. roce_set_field(context->qpc_bytes_16,
  2457. QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
  2458. QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
  2459. } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
  2460. roce_set_field(context->qpc_bytes_4,
  2461. QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
  2462. QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
  2463. to_hr_qp_type(hr_qp->ibqp.qp_type));
  2464. roce_set_bit(context->qpc_bytes_4,
  2465. QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
  2466. if (attr_mask & IB_QP_ACCESS_FLAGS) {
  2467. roce_set_bit(context->qpc_bytes_4,
  2468. QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
  2469. !!(attr->qp_access_flags &
  2470. IB_ACCESS_REMOTE_READ));
  2471. roce_set_bit(context->qpc_bytes_4,
  2472. QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
  2473. !!(attr->qp_access_flags &
  2474. IB_ACCESS_REMOTE_WRITE));
  2475. } else {
  2476. roce_set_bit(context->qpc_bytes_4,
  2477. QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
  2478. !!(hr_qp->access_flags &
  2479. IB_ACCESS_REMOTE_READ));
  2480. roce_set_bit(context->qpc_bytes_4,
  2481. QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
  2482. !!(hr_qp->access_flags &
  2483. IB_ACCESS_REMOTE_WRITE));
  2484. }
  2485. roce_set_bit(context->qpc_bytes_4,
  2486. QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
  2487. roce_set_field(context->qpc_bytes_4,
  2488. QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
  2489. QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
  2490. ilog2((unsigned int)hr_qp->sq.wqe_cnt));
  2491. roce_set_field(context->qpc_bytes_4,
  2492. QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
  2493. QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
  2494. ilog2((unsigned int)hr_qp->rq.wqe_cnt));
  2495. roce_set_field(context->qpc_bytes_4,
  2496. QP_CONTEXT_QPC_BYTES_4_PD_M,
  2497. QP_CONTEXT_QPC_BYTES_4_PD_S,
  2498. to_hr_pd(ibqp->pd)->pdn);
  2499. roce_set_field(context->qpc_bytes_8,
  2500. QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
  2501. QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
  2502. to_hr_cq(ibqp->send_cq)->cqn);
  2503. roce_set_field(context->qpc_bytes_8,
  2504. QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
  2505. QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
  2506. to_hr_cq(ibqp->recv_cq)->cqn);
  2507. if (ibqp->srq)
  2508. roce_set_field(context->qpc_bytes_12,
  2509. QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
  2510. QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
  2511. to_hr_srq(ibqp->srq)->srqn);
  2512. if (attr_mask & IB_QP_PKEY_INDEX)
  2513. roce_set_field(context->qpc_bytes_12,
  2514. QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
  2515. QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
  2516. attr->pkey_index);
  2517. else
  2518. roce_set_field(context->qpc_bytes_12,
  2519. QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
  2520. QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
  2521. hr_qp->pkey_index);
  2522. roce_set_field(context->qpc_bytes_16,
  2523. QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
  2524. QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
  2525. } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
  2526. if ((attr_mask & IB_QP_ALT_PATH) ||
  2527. (attr_mask & IB_QP_ACCESS_FLAGS) ||
  2528. (attr_mask & IB_QP_PKEY_INDEX) ||
  2529. (attr_mask & IB_QP_QKEY)) {
  2530. dev_err(dev, "INIT2RTR attr_mask error\n");
  2531. goto out;
  2532. }
  2533. dmac = (u8 *)attr->ah_attr.roce.dmac;
  2534. context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
  2535. roce_set_field(context->qpc_bytes_24,
  2536. QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
  2537. QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
  2538. ((u32)(dma_handle >> 32)));
  2539. roce_set_bit(context->qpc_bytes_24,
  2540. QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
  2541. 1);
  2542. roce_set_field(context->qpc_bytes_24,
  2543. QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
  2544. QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
  2545. attr->min_rnr_timer);
  2546. context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2));
  2547. roce_set_field(context->qpc_bytes_32,
  2548. QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
  2549. QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
  2550. ((u32)(dma_handle_2 >> 32)) &
  2551. QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
  2552. roce_set_field(context->qpc_bytes_32,
  2553. QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
  2554. QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
  2555. roce_set_bit(context->qpc_bytes_32,
  2556. QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
  2557. 1);
  2558. roce_set_bit(context->qpc_bytes_32,
  2559. QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
  2560. le32_to_cpu(hr_qp->sq_signal_bits));
  2561. port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
  2562. hr_qp->port;
  2563. smac = (u8 *)hr_dev->dev_addr[port];
  2564. /* when dmac equals smac or loop_idc is 1, it should loopback */
  2565. if (ether_addr_equal_unaligned(dmac, smac) ||
  2566. hr_dev->loop_idc == 0x1)
  2567. roce_set_bit(context->qpc_bytes_32,
  2568. QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
  2569. roce_set_bit(context->qpc_bytes_32,
  2570. QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
  2571. rdma_ah_get_ah_flags(&attr->ah_attr));
  2572. roce_set_field(context->qpc_bytes_32,
  2573. QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
  2574. QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
  2575. ilog2((unsigned int)attr->max_dest_rd_atomic));
  2576. if (attr_mask & IB_QP_DEST_QPN)
  2577. roce_set_field(context->qpc_bytes_36,
  2578. QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
  2579. QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
  2580. attr->dest_qp_num);
  2581. /* Configure GID index */
  2582. port_num = rdma_ah_get_port_num(&attr->ah_attr);
  2583. roce_set_field(context->qpc_bytes_36,
  2584. QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
  2585. QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
  2586. hns_get_gid_index(hr_dev,
  2587. port_num - 1,
  2588. grh->sgid_index));
  2589. memcpy(&(context->dmac_l), dmac, 4);
  2590. roce_set_field(context->qpc_bytes_44,
  2591. QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
  2592. QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
  2593. *((u16 *)(&dmac[4])));
  2594. roce_set_field(context->qpc_bytes_44,
  2595. QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
  2596. QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
  2597. rdma_ah_get_static_rate(&attr->ah_attr));
  2598. roce_set_field(context->qpc_bytes_44,
  2599. QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
  2600. QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
  2601. grh->hop_limit);
  2602. roce_set_field(context->qpc_bytes_48,
  2603. QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
  2604. QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
  2605. grh->flow_label);
  2606. roce_set_field(context->qpc_bytes_48,
  2607. QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
  2608. QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
  2609. grh->traffic_class);
  2610. roce_set_field(context->qpc_bytes_48,
  2611. QP_CONTEXT_QPC_BYTES_48_MTU_M,
  2612. QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
  2613. memcpy(context->dgid, grh->dgid.raw,
  2614. sizeof(grh->dgid.raw));
  2615. dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
  2616. roce_get_field(context->qpc_bytes_44,
  2617. QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
  2618. QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
  2619. roce_set_field(context->qpc_bytes_68,
  2620. QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
  2621. QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
  2622. hr_qp->rq.head);
  2623. roce_set_field(context->qpc_bytes_68,
  2624. QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
  2625. QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
  2626. rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
  2627. context->cur_rq_wqe_ba_l =
  2628. cpu_to_le32((u32)(mtts[rq_pa_start]));
  2629. roce_set_field(context->qpc_bytes_76,
  2630. QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
  2631. QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
  2632. mtts[rq_pa_start] >> 32);
  2633. roce_set_field(context->qpc_bytes_76,
  2634. QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
  2635. QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
  2636. context->rx_rnr_time = 0;
  2637. roce_set_field(context->qpc_bytes_84,
  2638. QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
  2639. QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
  2640. attr->rq_psn - 1);
  2641. roce_set_field(context->qpc_bytes_84,
  2642. QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
  2643. QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
  2644. roce_set_field(context->qpc_bytes_88,
  2645. QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
  2646. QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
  2647. attr->rq_psn);
  2648. roce_set_bit(context->qpc_bytes_88,
  2649. QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
  2650. roce_set_bit(context->qpc_bytes_88,
  2651. QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
  2652. roce_set_field(context->qpc_bytes_88,
  2653. QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
  2654. QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
  2655. 0);
  2656. roce_set_field(context->qpc_bytes_88,
  2657. QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
  2658. QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
  2659. 0);
  2660. context->dma_length = 0;
  2661. context->r_key = 0;
  2662. context->va_l = 0;
  2663. context->va_h = 0;
  2664. roce_set_field(context->qpc_bytes_108,
  2665. QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
  2666. QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
  2667. roce_set_bit(context->qpc_bytes_108,
  2668. QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
  2669. roce_set_bit(context->qpc_bytes_108,
  2670. QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
  2671. roce_set_field(context->qpc_bytes_112,
  2672. QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
  2673. QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
  2674. roce_set_field(context->qpc_bytes_112,
  2675. QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
  2676. QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
  2677. /* For chip resp ack */
  2678. roce_set_field(context->qpc_bytes_156,
  2679. QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
  2680. QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
  2681. hr_qp->phy_port);
  2682. roce_set_field(context->qpc_bytes_156,
  2683. QP_CONTEXT_QPC_BYTES_156_SL_M,
  2684. QP_CONTEXT_QPC_BYTES_156_SL_S,
  2685. rdma_ah_get_sl(&attr->ah_attr));
  2686. hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
  2687. } else if (cur_state == IB_QPS_RTR &&
  2688. new_state == IB_QPS_RTS) {
  2689. /* If exist optional param, return error */
  2690. if ((attr_mask & IB_QP_ALT_PATH) ||
  2691. (attr_mask & IB_QP_ACCESS_FLAGS) ||
  2692. (attr_mask & IB_QP_QKEY) ||
  2693. (attr_mask & IB_QP_PATH_MIG_STATE) ||
  2694. (attr_mask & IB_QP_CUR_STATE) ||
  2695. (attr_mask & IB_QP_MIN_RNR_TIMER)) {
  2696. dev_err(dev, "RTR2RTS attr_mask error\n");
  2697. goto out;
  2698. }
  2699. context->rx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
  2700. roce_set_field(context->qpc_bytes_120,
  2701. QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
  2702. QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
  2703. (mtts[0]) >> 32);
  2704. roce_set_field(context->qpc_bytes_124,
  2705. QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
  2706. QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
  2707. roce_set_field(context->qpc_bytes_124,
  2708. QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
  2709. QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
  2710. roce_set_field(context->qpc_bytes_128,
  2711. QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
  2712. QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
  2713. attr->sq_psn);
  2714. roce_set_bit(context->qpc_bytes_128,
  2715. QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
  2716. roce_set_field(context->qpc_bytes_128,
  2717. QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
  2718. QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
  2719. 0);
  2720. roce_set_bit(context->qpc_bytes_128,
  2721. QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
  2722. roce_set_field(context->qpc_bytes_132,
  2723. QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
  2724. QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
  2725. roce_set_field(context->qpc_bytes_132,
  2726. QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
  2727. QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
  2728. roce_set_field(context->qpc_bytes_136,
  2729. QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
  2730. QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
  2731. attr->sq_psn);
  2732. roce_set_field(context->qpc_bytes_136,
  2733. QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
  2734. QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
  2735. attr->sq_psn);
  2736. roce_set_field(context->qpc_bytes_140,
  2737. QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
  2738. QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
  2739. (attr->sq_psn >> SQ_PSN_SHIFT));
  2740. roce_set_field(context->qpc_bytes_140,
  2741. QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
  2742. QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
  2743. roce_set_bit(context->qpc_bytes_140,
  2744. QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
  2745. roce_set_field(context->qpc_bytes_148,
  2746. QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
  2747. QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
  2748. roce_set_field(context->qpc_bytes_148,
  2749. QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
  2750. QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
  2751. attr->retry_cnt);
  2752. roce_set_field(context->qpc_bytes_148,
  2753. QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
  2754. QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
  2755. attr->rnr_retry);
  2756. roce_set_field(context->qpc_bytes_148,
  2757. QP_CONTEXT_QPC_BYTES_148_LSN_M,
  2758. QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
  2759. context->rnr_retry = 0;
  2760. roce_set_field(context->qpc_bytes_156,
  2761. QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
  2762. QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
  2763. attr->retry_cnt);
  2764. if (attr->timeout < 0x12) {
  2765. dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
  2766. attr->timeout);
  2767. roce_set_field(context->qpc_bytes_156,
  2768. QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
  2769. QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
  2770. 0x12);
  2771. } else {
  2772. roce_set_field(context->qpc_bytes_156,
  2773. QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
  2774. QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
  2775. attr->timeout);
  2776. }
  2777. roce_set_field(context->qpc_bytes_156,
  2778. QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
  2779. QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
  2780. attr->rnr_retry);
  2781. roce_set_field(context->qpc_bytes_156,
  2782. QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
  2783. QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
  2784. hr_qp->phy_port);
  2785. roce_set_field(context->qpc_bytes_156,
  2786. QP_CONTEXT_QPC_BYTES_156_SL_M,
  2787. QP_CONTEXT_QPC_BYTES_156_SL_S,
  2788. rdma_ah_get_sl(&attr->ah_attr));
  2789. hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
  2790. roce_set_field(context->qpc_bytes_156,
  2791. QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
  2792. QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
  2793. ilog2((unsigned int)attr->max_rd_atomic));
  2794. roce_set_field(context->qpc_bytes_156,
  2795. QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
  2796. QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
  2797. context->pkt_use_len = 0;
  2798. roce_set_field(context->qpc_bytes_164,
  2799. QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
  2800. QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
  2801. roce_set_field(context->qpc_bytes_164,
  2802. QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
  2803. QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
  2804. roce_set_field(context->qpc_bytes_168,
  2805. QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
  2806. QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
  2807. attr->sq_psn);
  2808. roce_set_field(context->qpc_bytes_168,
  2809. QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
  2810. QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
  2811. roce_set_field(context->qpc_bytes_168,
  2812. QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
  2813. QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
  2814. roce_set_bit(context->qpc_bytes_168,
  2815. QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
  2816. roce_set_bit(context->qpc_bytes_168,
  2817. QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
  2818. roce_set_bit(context->qpc_bytes_168,
  2819. QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
  2820. context->sge_use_len = 0;
  2821. roce_set_field(context->qpc_bytes_176,
  2822. QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
  2823. QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
  2824. roce_set_field(context->qpc_bytes_176,
  2825. QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
  2826. QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
  2827. 0);
  2828. roce_set_field(context->qpc_bytes_180,
  2829. QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
  2830. QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
  2831. roce_set_field(context->qpc_bytes_180,
  2832. QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
  2833. QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
  2834. context->tx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
  2835. roce_set_field(context->qpc_bytes_188,
  2836. QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
  2837. QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
  2838. (mtts[0]) >> 32);
  2839. roce_set_bit(context->qpc_bytes_188,
  2840. QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
  2841. roce_set_field(context->qpc_bytes_188,
  2842. QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
  2843. QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
  2844. 0);
  2845. } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
  2846. (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
  2847. (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
  2848. (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
  2849. (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
  2850. (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
  2851. (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
  2852. (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
  2853. dev_err(dev, "not support this status migration\n");
  2854. goto out;
  2855. }
  2856. /* Every status migrate must change state */
  2857. roce_set_field(context->qpc_bytes_144,
  2858. QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
  2859. QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
  2860. /* SW pass context to HW */
  2861. ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
  2862. to_hns_roce_state(cur_state),
  2863. to_hns_roce_state(new_state), context,
  2864. hr_qp);
  2865. if (ret) {
  2866. dev_err(dev, "hns_roce_qp_modify failed\n");
  2867. goto out;
  2868. }
  2869. /*
  2870. * Use rst2init to instead of init2init with drv,
  2871. * need to hw to flash RQ HEAD by DB again
  2872. */
  2873. if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
  2874. /* Memory barrier */
  2875. wmb();
  2876. roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
  2877. RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
  2878. roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
  2879. RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
  2880. roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
  2881. RQ_DOORBELL_U32_8_CMD_S, 1);
  2882. roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
  2883. if (ibqp->uobject) {
  2884. hr_qp->rq.db_reg_l = hr_dev->reg_base +
  2885. hr_dev->odb_offset +
  2886. DB_REG_OFFSET * hr_dev->priv_uar.index;
  2887. }
  2888. hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
  2889. }
  2890. hr_qp->state = new_state;
  2891. if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
  2892. hr_qp->resp_depth = attr->max_dest_rd_atomic;
  2893. if (attr_mask & IB_QP_PORT) {
  2894. hr_qp->port = attr->port_num - 1;
  2895. hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
  2896. }
  2897. if (new_state == IB_QPS_RESET && !ibqp->uobject) {
  2898. hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
  2899. ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
  2900. if (ibqp->send_cq != ibqp->recv_cq)
  2901. hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
  2902. hr_qp->qpn, NULL);
  2903. hr_qp->rq.head = 0;
  2904. hr_qp->rq.tail = 0;
  2905. hr_qp->sq.head = 0;
  2906. hr_qp->sq.tail = 0;
  2907. hr_qp->sq_next_wqe = 0;
  2908. }
  2909. out:
  2910. kfree(context);
  2911. return ret;
  2912. }
  2913. static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
  2914. const struct ib_qp_attr *attr, int attr_mask,
  2915. enum ib_qp_state cur_state,
  2916. enum ib_qp_state new_state)
  2917. {
  2918. if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
  2919. return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
  2920. new_state);
  2921. else
  2922. return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
  2923. new_state);
  2924. }
  2925. static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
  2926. {
  2927. switch (state) {
  2928. case HNS_ROCE_QP_STATE_RST:
  2929. return IB_QPS_RESET;
  2930. case HNS_ROCE_QP_STATE_INIT:
  2931. return IB_QPS_INIT;
  2932. case HNS_ROCE_QP_STATE_RTR:
  2933. return IB_QPS_RTR;
  2934. case HNS_ROCE_QP_STATE_RTS:
  2935. return IB_QPS_RTS;
  2936. case HNS_ROCE_QP_STATE_SQD:
  2937. return IB_QPS_SQD;
  2938. case HNS_ROCE_QP_STATE_ERR:
  2939. return IB_QPS_ERR;
  2940. default:
  2941. return IB_QPS_ERR;
  2942. }
  2943. }
  2944. static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
  2945. struct hns_roce_qp *hr_qp,
  2946. struct hns_roce_qp_context *hr_context)
  2947. {
  2948. struct hns_roce_cmd_mailbox *mailbox;
  2949. int ret;
  2950. mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
  2951. if (IS_ERR(mailbox))
  2952. return PTR_ERR(mailbox);
  2953. ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
  2954. HNS_ROCE_CMD_QUERY_QP,
  2955. HNS_ROCE_CMD_TIMEOUT_MSECS);
  2956. if (!ret)
  2957. memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
  2958. else
  2959. dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
  2960. hns_roce_free_cmd_mailbox(hr_dev, mailbox);
  2961. return ret;
  2962. }
  2963. static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
  2964. int qp_attr_mask,
  2965. struct ib_qp_init_attr *qp_init_attr)
  2966. {
  2967. struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  2968. struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
  2969. struct hns_roce_sqp_context context;
  2970. u32 addr;
  2971. mutex_lock(&hr_qp->mutex);
  2972. if (hr_qp->state == IB_QPS_RESET) {
  2973. qp_attr->qp_state = IB_QPS_RESET;
  2974. goto done;
  2975. }
  2976. addr = ROCEE_QP1C_CFG0_0_REG +
  2977. hr_qp->port * sizeof(struct hns_roce_sqp_context);
  2978. context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr));
  2979. context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1));
  2980. context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2));
  2981. context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3));
  2982. context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4));
  2983. context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5));
  2984. context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6));
  2985. context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7));
  2986. context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8));
  2987. context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9));
  2988. hr_qp->state = roce_get_field(context.qp1c_bytes_4,
  2989. QP1C_BYTES_4_QP_STATE_M,
  2990. QP1C_BYTES_4_QP_STATE_S);
  2991. qp_attr->qp_state = hr_qp->state;
  2992. qp_attr->path_mtu = IB_MTU_256;
  2993. qp_attr->path_mig_state = IB_MIG_ARMED;
  2994. qp_attr->qkey = QKEY_VAL;
  2995. qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
  2996. qp_attr->rq_psn = 0;
  2997. qp_attr->sq_psn = 0;
  2998. qp_attr->dest_qp_num = 1;
  2999. qp_attr->qp_access_flags = 6;
  3000. qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
  3001. QP1C_BYTES_20_PKEY_IDX_M,
  3002. QP1C_BYTES_20_PKEY_IDX_S);
  3003. qp_attr->port_num = hr_qp->port + 1;
  3004. qp_attr->sq_draining = 0;
  3005. qp_attr->max_rd_atomic = 0;
  3006. qp_attr->max_dest_rd_atomic = 0;
  3007. qp_attr->min_rnr_timer = 0;
  3008. qp_attr->timeout = 0;
  3009. qp_attr->retry_cnt = 0;
  3010. qp_attr->rnr_retry = 0;
  3011. qp_attr->alt_timeout = 0;
  3012. done:
  3013. qp_attr->cur_qp_state = qp_attr->qp_state;
  3014. qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
  3015. qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
  3016. qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
  3017. qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
  3018. qp_attr->cap.max_inline_data = 0;
  3019. qp_init_attr->cap = qp_attr->cap;
  3020. qp_init_attr->create_flags = 0;
  3021. mutex_unlock(&hr_qp->mutex);
  3022. return 0;
  3023. }
  3024. static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
  3025. int qp_attr_mask,
  3026. struct ib_qp_init_attr *qp_init_attr)
  3027. {
  3028. struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  3029. struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
  3030. struct device *dev = &hr_dev->pdev->dev;
  3031. struct hns_roce_qp_context *context;
  3032. int tmp_qp_state = 0;
  3033. int ret = 0;
  3034. int state;
  3035. context = kzalloc(sizeof(*context), GFP_KERNEL);
  3036. if (!context)
  3037. return -ENOMEM;
  3038. memset(qp_attr, 0, sizeof(*qp_attr));
  3039. memset(qp_init_attr, 0, sizeof(*qp_init_attr));
  3040. mutex_lock(&hr_qp->mutex);
  3041. if (hr_qp->state == IB_QPS_RESET) {
  3042. qp_attr->qp_state = IB_QPS_RESET;
  3043. goto done;
  3044. }
  3045. ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
  3046. if (ret) {
  3047. dev_err(dev, "query qpc error\n");
  3048. ret = -EINVAL;
  3049. goto out;
  3050. }
  3051. state = roce_get_field(context->qpc_bytes_144,
  3052. QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
  3053. QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
  3054. tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
  3055. if (tmp_qp_state == -1) {
  3056. dev_err(dev, "to_ib_qp_state error\n");
  3057. ret = -EINVAL;
  3058. goto out;
  3059. }
  3060. hr_qp->state = (u8)tmp_qp_state;
  3061. qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
  3062. qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
  3063. QP_CONTEXT_QPC_BYTES_48_MTU_M,
  3064. QP_CONTEXT_QPC_BYTES_48_MTU_S);
  3065. qp_attr->path_mig_state = IB_MIG_ARMED;
  3066. qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
  3067. if (hr_qp->ibqp.qp_type == IB_QPT_UD)
  3068. qp_attr->qkey = QKEY_VAL;
  3069. qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
  3070. QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
  3071. QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
  3072. qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
  3073. QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
  3074. QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
  3075. qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
  3076. QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
  3077. QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
  3078. qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
  3079. QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
  3080. ((roce_get_bit(context->qpc_bytes_4,
  3081. QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
  3082. ((roce_get_bit(context->qpc_bytes_4,
  3083. QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
  3084. if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
  3085. hr_qp->ibqp.qp_type == IB_QPT_UC) {
  3086. struct ib_global_route *grh =
  3087. rdma_ah_retrieve_grh(&qp_attr->ah_attr);
  3088. rdma_ah_set_sl(&qp_attr->ah_attr,
  3089. roce_get_field(context->qpc_bytes_156,
  3090. QP_CONTEXT_QPC_BYTES_156_SL_M,
  3091. QP_CONTEXT_QPC_BYTES_156_SL_S));
  3092. rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
  3093. grh->flow_label =
  3094. roce_get_field(context->qpc_bytes_48,
  3095. QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
  3096. QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
  3097. grh->sgid_index =
  3098. roce_get_field(context->qpc_bytes_36,
  3099. QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
  3100. QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
  3101. grh->hop_limit =
  3102. roce_get_field(context->qpc_bytes_44,
  3103. QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
  3104. QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
  3105. grh->traffic_class =
  3106. roce_get_field(context->qpc_bytes_48,
  3107. QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
  3108. QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
  3109. memcpy(grh->dgid.raw, context->dgid,
  3110. sizeof(grh->dgid.raw));
  3111. }
  3112. qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
  3113. QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
  3114. QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
  3115. qp_attr->port_num = hr_qp->port + 1;
  3116. qp_attr->sq_draining = 0;
  3117. qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
  3118. QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
  3119. QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
  3120. qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
  3121. QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
  3122. QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
  3123. qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
  3124. QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
  3125. QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
  3126. qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
  3127. QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
  3128. QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
  3129. qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
  3130. QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
  3131. QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
  3132. qp_attr->rnr_retry = (u8)context->rnr_retry;
  3133. done:
  3134. qp_attr->cur_qp_state = qp_attr->qp_state;
  3135. qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
  3136. qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
  3137. if (!ibqp->uobject) {
  3138. qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
  3139. qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
  3140. } else {
  3141. qp_attr->cap.max_send_wr = 0;
  3142. qp_attr->cap.max_send_sge = 0;
  3143. }
  3144. qp_init_attr->cap = qp_attr->cap;
  3145. out:
  3146. mutex_unlock(&hr_qp->mutex);
  3147. kfree(context);
  3148. return ret;
  3149. }
  3150. static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
  3151. int qp_attr_mask,
  3152. struct ib_qp_init_attr *qp_init_attr)
  3153. {
  3154. struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
  3155. return hr_qp->doorbell_qpn <= 1 ?
  3156. hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
  3157. hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
  3158. }
  3159. static void hns_roce_check_sdb_status(struct hns_roce_dev *hr_dev,
  3160. u32 *old_send, u32 *old_retry,
  3161. u32 *tsp_st, u32 *success_flags)
  3162. {
  3163. __le32 *old_send_tmp, *old_retry_tmp;
  3164. u32 sdb_retry_cnt;
  3165. u32 sdb_send_ptr;
  3166. u32 cur_cnt, old_cnt;
  3167. __le32 tmp, tmp1;
  3168. u32 send_ptr;
  3169. sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
  3170. sdb_retry_cnt = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
  3171. tmp = cpu_to_le32(sdb_send_ptr);
  3172. tmp1 = cpu_to_le32(sdb_retry_cnt);
  3173. cur_cnt = roce_get_field(tmp, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
  3174. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
  3175. roce_get_field(tmp1, ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
  3176. ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
  3177. old_send_tmp = (__le32 *)old_send;
  3178. old_retry_tmp = (__le32 *)old_retry;
  3179. if (!roce_get_bit(*tsp_st, ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
  3180. old_cnt = roce_get_field(*old_send_tmp,
  3181. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
  3182. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
  3183. roce_get_field(*old_retry_tmp,
  3184. ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
  3185. ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
  3186. if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
  3187. *success_flags = 1;
  3188. } else {
  3189. old_cnt = roce_get_field(*old_send_tmp,
  3190. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
  3191. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
  3192. if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) {
  3193. *success_flags = 1;
  3194. } else {
  3195. send_ptr = roce_get_field(*old_send_tmp,
  3196. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
  3197. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
  3198. roce_get_field(tmp1,
  3199. ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
  3200. ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
  3201. roce_set_field(*old_send_tmp,
  3202. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
  3203. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
  3204. send_ptr);
  3205. }
  3206. }
  3207. }
  3208. static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
  3209. struct hns_roce_qp *hr_qp,
  3210. u32 sdb_issue_ptr,
  3211. u32 *sdb_inv_cnt,
  3212. u32 *wait_stage)
  3213. {
  3214. struct device *dev = &hr_dev->pdev->dev;
  3215. u32 sdb_send_ptr, old_send;
  3216. __le32 sdb_issue_ptr_tmp;
  3217. __le32 sdb_send_ptr_tmp;
  3218. u32 success_flags = 0;
  3219. unsigned long end;
  3220. u32 old_retry;
  3221. u32 inv_cnt;
  3222. u32 tsp_st;
  3223. __le32 tmp;
  3224. if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 ||
  3225. *wait_stage < HNS_ROCE_V1_DB_STAGE1) {
  3226. dev_err(dev, "QP(0x%lx) db status wait stage(%d) error!\n",
  3227. hr_qp->qpn, *wait_stage);
  3228. return -EINVAL;
  3229. }
  3230. /* Calculate the total timeout for the entire verification process */
  3231. end = msecs_to_jiffies(HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS) + jiffies;
  3232. if (*wait_stage == HNS_ROCE_V1_DB_STAGE1) {
  3233. /* Query db process status, until hw process completely */
  3234. sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
  3235. while (roce_hw_index_cmp_lt(sdb_send_ptr, sdb_issue_ptr,
  3236. ROCEE_SDB_PTR_CMP_BITS)) {
  3237. if (!time_before(jiffies, end)) {
  3238. dev_dbg(dev, "QP(0x%lx) db process stage1 timeout. issue 0x%x send 0x%x.\n",
  3239. hr_qp->qpn, sdb_issue_ptr,
  3240. sdb_send_ptr);
  3241. return 0;
  3242. }
  3243. msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
  3244. sdb_send_ptr = roce_read(hr_dev,
  3245. ROCEE_SDB_SEND_PTR_REG);
  3246. }
  3247. sdb_send_ptr_tmp = cpu_to_le32(sdb_send_ptr);
  3248. sdb_issue_ptr_tmp = cpu_to_le32(sdb_issue_ptr);
  3249. if (roce_get_field(sdb_issue_ptr_tmp,
  3250. ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
  3251. ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) ==
  3252. roce_get_field(sdb_send_ptr_tmp,
  3253. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
  3254. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) {
  3255. old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
  3256. old_retry = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
  3257. do {
  3258. tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG);
  3259. tmp = cpu_to_le32(tsp_st);
  3260. if (roce_get_bit(tmp,
  3261. ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) {
  3262. *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
  3263. return 0;
  3264. }
  3265. if (!time_before(jiffies, end)) {
  3266. dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n"
  3267. "issue 0x%x send 0x%x.\n",
  3268. hr_qp->qpn,
  3269. le32_to_cpu(sdb_issue_ptr_tmp),
  3270. le32_to_cpu(sdb_send_ptr_tmp));
  3271. return 0;
  3272. }
  3273. msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
  3274. hns_roce_check_sdb_status(hr_dev, &old_send,
  3275. &old_retry, &tsp_st,
  3276. &success_flags);
  3277. } while (!success_flags);
  3278. }
  3279. *wait_stage = HNS_ROCE_V1_DB_STAGE2;
  3280. /* Get list pointer */
  3281. *sdb_inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
  3282. dev_dbg(dev, "QP(0x%lx) db process stage2. inv cnt = 0x%x.\n",
  3283. hr_qp->qpn, *sdb_inv_cnt);
  3284. }
  3285. if (*wait_stage == HNS_ROCE_V1_DB_STAGE2) {
  3286. /* Query db's list status, until hw reversal */
  3287. inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
  3288. while (roce_hw_index_cmp_lt(inv_cnt,
  3289. *sdb_inv_cnt + SDB_INV_CNT_OFFSET,
  3290. ROCEE_SDB_CNT_CMP_BITS)) {
  3291. if (!time_before(jiffies, end)) {
  3292. dev_dbg(dev, "QP(0x%lx) db process stage2 timeout. inv cnt 0x%x.\n",
  3293. hr_qp->qpn, inv_cnt);
  3294. return 0;
  3295. }
  3296. msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
  3297. inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
  3298. }
  3299. *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
  3300. }
  3301. return 0;
  3302. }
  3303. static int check_qp_reset_state(struct hns_roce_dev *hr_dev,
  3304. struct hns_roce_qp *hr_qp,
  3305. struct hns_roce_qp_work *qp_work_entry,
  3306. int *is_timeout)
  3307. {
  3308. struct device *dev = &hr_dev->pdev->dev;
  3309. u32 sdb_issue_ptr;
  3310. int ret;
  3311. if (hr_qp->state != IB_QPS_RESET) {
  3312. /* Set qp to ERR, waiting for hw complete processing all dbs */
  3313. ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
  3314. IB_QPS_ERR);
  3315. if (ret) {
  3316. dev_err(dev, "Modify QP(0x%lx) to ERR failed!\n",
  3317. hr_qp->qpn);
  3318. return ret;
  3319. }
  3320. /* Record issued doorbell */
  3321. sdb_issue_ptr = roce_read(hr_dev, ROCEE_SDB_ISSUE_PTR_REG);
  3322. qp_work_entry->sdb_issue_ptr = sdb_issue_ptr;
  3323. qp_work_entry->db_wait_stage = HNS_ROCE_V1_DB_STAGE1;
  3324. /* Query db process status, until hw process completely */
  3325. ret = check_qp_db_process_status(hr_dev, hr_qp, sdb_issue_ptr,
  3326. &qp_work_entry->sdb_inv_cnt,
  3327. &qp_work_entry->db_wait_stage);
  3328. if (ret) {
  3329. dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
  3330. hr_qp->qpn);
  3331. return ret;
  3332. }
  3333. if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK) {
  3334. qp_work_entry->sche_cnt = 0;
  3335. *is_timeout = 1;
  3336. return 0;
  3337. }
  3338. /* Modify qp to reset before destroying qp */
  3339. ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
  3340. IB_QPS_RESET);
  3341. if (ret) {
  3342. dev_err(dev, "Modify QP(0x%lx) to RST failed!\n",
  3343. hr_qp->qpn);
  3344. return ret;
  3345. }
  3346. }
  3347. return 0;
  3348. }
  3349. static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
  3350. {
  3351. struct hns_roce_qp_work *qp_work_entry;
  3352. struct hns_roce_v1_priv *priv;
  3353. struct hns_roce_dev *hr_dev;
  3354. struct hns_roce_qp *hr_qp;
  3355. struct device *dev;
  3356. unsigned long qpn;
  3357. int ret;
  3358. qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
  3359. hr_dev = to_hr_dev(qp_work_entry->ib_dev);
  3360. dev = &hr_dev->pdev->dev;
  3361. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  3362. hr_qp = qp_work_entry->qp;
  3363. qpn = hr_qp->qpn;
  3364. dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn);
  3365. qp_work_entry->sche_cnt++;
  3366. /* Query db process status, until hw process completely */
  3367. ret = check_qp_db_process_status(hr_dev, hr_qp,
  3368. qp_work_entry->sdb_issue_ptr,
  3369. &qp_work_entry->sdb_inv_cnt,
  3370. &qp_work_entry->db_wait_stage);
  3371. if (ret) {
  3372. dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
  3373. qpn);
  3374. return;
  3375. }
  3376. if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK &&
  3377. priv->des_qp.requeue_flag) {
  3378. queue_work(priv->des_qp.qp_wq, work);
  3379. return;
  3380. }
  3381. /* Modify qp to reset before destroying qp */
  3382. ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
  3383. IB_QPS_RESET);
  3384. if (ret) {
  3385. dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn);
  3386. return;
  3387. }
  3388. hns_roce_qp_remove(hr_dev, hr_qp);
  3389. hns_roce_qp_free(hr_dev, hr_qp);
  3390. if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
  3391. /* RC QP, release QPN */
  3392. hns_roce_release_range_qp(hr_dev, qpn, 1);
  3393. kfree(hr_qp);
  3394. } else
  3395. kfree(hr_to_hr_sqp(hr_qp));
  3396. kfree(qp_work_entry);
  3397. dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn);
  3398. }
  3399. int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
  3400. {
  3401. struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  3402. struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
  3403. struct device *dev = &hr_dev->pdev->dev;
  3404. struct hns_roce_qp_work qp_work_entry;
  3405. struct hns_roce_qp_work *qp_work;
  3406. struct hns_roce_v1_priv *priv;
  3407. struct hns_roce_cq *send_cq, *recv_cq;
  3408. int is_user = !!ibqp->pd->uobject;
  3409. int is_timeout = 0;
  3410. int ret;
  3411. ret = check_qp_reset_state(hr_dev, hr_qp, &qp_work_entry, &is_timeout);
  3412. if (ret) {
  3413. dev_err(dev, "QP reset state check failed(%d)!\n", ret);
  3414. return ret;
  3415. }
  3416. send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
  3417. recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
  3418. hns_roce_lock_cqs(send_cq, recv_cq);
  3419. if (!is_user) {
  3420. __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
  3421. to_hr_srq(hr_qp->ibqp.srq) : NULL);
  3422. if (send_cq != recv_cq)
  3423. __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
  3424. }
  3425. hns_roce_unlock_cqs(send_cq, recv_cq);
  3426. if (!is_timeout) {
  3427. hns_roce_qp_remove(hr_dev, hr_qp);
  3428. hns_roce_qp_free(hr_dev, hr_qp);
  3429. /* RC QP, release QPN */
  3430. if (hr_qp->ibqp.qp_type == IB_QPT_RC)
  3431. hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
  3432. }
  3433. hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
  3434. if (is_user)
  3435. ib_umem_release(hr_qp->umem);
  3436. else {
  3437. kfree(hr_qp->sq.wrid);
  3438. kfree(hr_qp->rq.wrid);
  3439. hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
  3440. }
  3441. if (!is_timeout) {
  3442. if (hr_qp->ibqp.qp_type == IB_QPT_RC)
  3443. kfree(hr_qp);
  3444. else
  3445. kfree(hr_to_hr_sqp(hr_qp));
  3446. } else {
  3447. qp_work = kzalloc(sizeof(*qp_work), GFP_KERNEL);
  3448. if (!qp_work)
  3449. return -ENOMEM;
  3450. INIT_WORK(&qp_work->work, hns_roce_v1_destroy_qp_work_fn);
  3451. qp_work->ib_dev = &hr_dev->ib_dev;
  3452. qp_work->qp = hr_qp;
  3453. qp_work->db_wait_stage = qp_work_entry.db_wait_stage;
  3454. qp_work->sdb_issue_ptr = qp_work_entry.sdb_issue_ptr;
  3455. qp_work->sdb_inv_cnt = qp_work_entry.sdb_inv_cnt;
  3456. qp_work->sche_cnt = qp_work_entry.sche_cnt;
  3457. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  3458. queue_work(priv->des_qp.qp_wq, &qp_work->work);
  3459. dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn);
  3460. }
  3461. return 0;
  3462. }
  3463. static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
  3464. {
  3465. struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
  3466. struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
  3467. struct device *dev = &hr_dev->pdev->dev;
  3468. u32 cqe_cnt_ori;
  3469. u32 cqe_cnt_cur;
  3470. u32 cq_buf_size;
  3471. int wait_time = 0;
  3472. int ret = 0;
  3473. hns_roce_free_cq(hr_dev, hr_cq);
  3474. /*
  3475. * Before freeing cq buffer, we need to ensure that the outstanding CQE
  3476. * have been written by checking the CQE counter.
  3477. */
  3478. cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
  3479. while (1) {
  3480. if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
  3481. HNS_ROCE_CQE_WCMD_EMPTY_BIT)
  3482. break;
  3483. cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
  3484. if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
  3485. break;
  3486. msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
  3487. if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
  3488. dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
  3489. hr_cq->cqn);
  3490. ret = -ETIMEDOUT;
  3491. break;
  3492. }
  3493. wait_time++;
  3494. }
  3495. hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
  3496. if (ibcq->uobject)
  3497. ib_umem_release(hr_cq->umem);
  3498. else {
  3499. /* Free the buff of stored cq */
  3500. cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
  3501. hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
  3502. }
  3503. kfree(hr_cq);
  3504. return ret;
  3505. }
  3506. static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
  3507. {
  3508. roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
  3509. (req_not << eq->log_entries), eq->doorbell);
  3510. }
  3511. static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
  3512. struct hns_roce_aeqe *aeqe, int qpn)
  3513. {
  3514. struct device *dev = &hr_dev->pdev->dev;
  3515. dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
  3516. switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
  3517. HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
  3518. case HNS_ROCE_LWQCE_QPC_ERROR:
  3519. dev_warn(dev, "QP %d, QPC error.\n", qpn);
  3520. break;
  3521. case HNS_ROCE_LWQCE_MTU_ERROR:
  3522. dev_warn(dev, "QP %d, MTU error.\n", qpn);
  3523. break;
  3524. case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
  3525. dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
  3526. break;
  3527. case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
  3528. dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
  3529. break;
  3530. case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
  3531. dev_warn(dev, "QP %d, WQE shift error\n", qpn);
  3532. break;
  3533. case HNS_ROCE_LWQCE_SL_ERROR:
  3534. dev_warn(dev, "QP %d, SL error.\n", qpn);
  3535. break;
  3536. case HNS_ROCE_LWQCE_PORT_ERROR:
  3537. dev_warn(dev, "QP %d, port error.\n", qpn);
  3538. break;
  3539. default:
  3540. break;
  3541. }
  3542. }
  3543. static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
  3544. struct hns_roce_aeqe *aeqe,
  3545. int qpn)
  3546. {
  3547. struct device *dev = &hr_dev->pdev->dev;
  3548. dev_warn(dev, "Local Access Violation Work Queue Error.\n");
  3549. switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
  3550. HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
  3551. case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
  3552. dev_warn(dev, "QP %d, R_key violation.\n", qpn);
  3553. break;
  3554. case HNS_ROCE_LAVWQE_LENGTH_ERROR:
  3555. dev_warn(dev, "QP %d, length error.\n", qpn);
  3556. break;
  3557. case HNS_ROCE_LAVWQE_VA_ERROR:
  3558. dev_warn(dev, "QP %d, VA error.\n", qpn);
  3559. break;
  3560. case HNS_ROCE_LAVWQE_PD_ERROR:
  3561. dev_err(dev, "QP %d, PD error.\n", qpn);
  3562. break;
  3563. case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
  3564. dev_warn(dev, "QP %d, rw acc error.\n", qpn);
  3565. break;
  3566. case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
  3567. dev_warn(dev, "QP %d, key state error.\n", qpn);
  3568. break;
  3569. case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
  3570. dev_warn(dev, "QP %d, MR operation error.\n", qpn);
  3571. break;
  3572. default:
  3573. break;
  3574. }
  3575. }
  3576. static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
  3577. struct hns_roce_aeqe *aeqe,
  3578. int event_type)
  3579. {
  3580. struct device *dev = &hr_dev->pdev->dev;
  3581. int phy_port;
  3582. int qpn;
  3583. qpn = roce_get_field(aeqe->event.qp_event.qp,
  3584. HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
  3585. HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
  3586. phy_port = roce_get_field(aeqe->event.qp_event.qp,
  3587. HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
  3588. HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
  3589. if (qpn <= 1)
  3590. qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
  3591. switch (event_type) {
  3592. case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
  3593. dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
  3594. "QP %d, phy_port %d.\n", qpn, phy_port);
  3595. break;
  3596. case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
  3597. hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
  3598. break;
  3599. case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
  3600. hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
  3601. break;
  3602. default:
  3603. break;
  3604. }
  3605. hns_roce_qp_event(hr_dev, qpn, event_type);
  3606. }
  3607. static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
  3608. struct hns_roce_aeqe *aeqe,
  3609. int event_type)
  3610. {
  3611. struct device *dev = &hr_dev->pdev->dev;
  3612. u32 cqn;
  3613. cqn = roce_get_field(aeqe->event.cq_event.cq,
  3614. HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
  3615. HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
  3616. switch (event_type) {
  3617. case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
  3618. dev_warn(dev, "CQ 0x%x access err.\n", cqn);
  3619. break;
  3620. case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
  3621. dev_warn(dev, "CQ 0x%x overflow\n", cqn);
  3622. break;
  3623. case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
  3624. dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
  3625. break;
  3626. default:
  3627. break;
  3628. }
  3629. hns_roce_cq_event(hr_dev, cqn, event_type);
  3630. }
  3631. static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
  3632. struct hns_roce_aeqe *aeqe)
  3633. {
  3634. struct device *dev = &hr_dev->pdev->dev;
  3635. switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
  3636. HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
  3637. case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
  3638. dev_warn(dev, "SDB overflow.\n");
  3639. break;
  3640. case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
  3641. dev_warn(dev, "SDB almost overflow.\n");
  3642. break;
  3643. case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
  3644. dev_warn(dev, "SDB almost empty.\n");
  3645. break;
  3646. case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
  3647. dev_warn(dev, "ODB overflow.\n");
  3648. break;
  3649. case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
  3650. dev_warn(dev, "ODB almost overflow.\n");
  3651. break;
  3652. case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
  3653. dev_warn(dev, "SDB almost empty.\n");
  3654. break;
  3655. default:
  3656. break;
  3657. }
  3658. }
  3659. static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
  3660. {
  3661. unsigned long off = (entry & (eq->entries - 1)) *
  3662. HNS_ROCE_AEQ_ENTRY_SIZE;
  3663. return (struct hns_roce_aeqe *)((u8 *)
  3664. (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
  3665. off % HNS_ROCE_BA_SIZE);
  3666. }
  3667. static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
  3668. {
  3669. struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
  3670. return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
  3671. !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
  3672. }
  3673. static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
  3674. struct hns_roce_eq *eq)
  3675. {
  3676. struct device *dev = &hr_dev->pdev->dev;
  3677. struct hns_roce_aeqe *aeqe;
  3678. int aeqes_found = 0;
  3679. int event_type;
  3680. while ((aeqe = next_aeqe_sw_v1(eq))) {
  3681. /* Make sure we read the AEQ entry after we have checked the
  3682. * ownership bit
  3683. */
  3684. dma_rmb();
  3685. dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
  3686. roce_get_field(aeqe->asyn,
  3687. HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
  3688. HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
  3689. event_type = roce_get_field(aeqe->asyn,
  3690. HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
  3691. HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
  3692. switch (event_type) {
  3693. case HNS_ROCE_EVENT_TYPE_PATH_MIG:
  3694. dev_warn(dev, "PATH MIG not supported\n");
  3695. break;
  3696. case HNS_ROCE_EVENT_TYPE_COMM_EST:
  3697. dev_warn(dev, "COMMUNICATION established\n");
  3698. break;
  3699. case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
  3700. dev_warn(dev, "SQ DRAINED not supported\n");
  3701. break;
  3702. case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
  3703. dev_warn(dev, "PATH MIG failed\n");
  3704. break;
  3705. case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
  3706. case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
  3707. case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
  3708. hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
  3709. break;
  3710. case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
  3711. case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
  3712. case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
  3713. dev_warn(dev, "SRQ not support!\n");
  3714. break;
  3715. case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
  3716. case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
  3717. case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
  3718. hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
  3719. break;
  3720. case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
  3721. dev_warn(dev, "port change.\n");
  3722. break;
  3723. case HNS_ROCE_EVENT_TYPE_MB:
  3724. hns_roce_cmd_event(hr_dev,
  3725. le16_to_cpu(aeqe->event.cmd.token),
  3726. aeqe->event.cmd.status,
  3727. le64_to_cpu(aeqe->event.cmd.out_param
  3728. ));
  3729. break;
  3730. case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
  3731. hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
  3732. break;
  3733. case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
  3734. dev_warn(dev, "CEQ 0x%lx overflow.\n",
  3735. roce_get_field(aeqe->event.ce_event.ceqe,
  3736. HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
  3737. HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
  3738. break;
  3739. default:
  3740. dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
  3741. event_type, eq->eqn, eq->cons_index);
  3742. break;
  3743. }
  3744. eq->cons_index++;
  3745. aeqes_found = 1;
  3746. if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
  3747. dev_warn(dev, "cons_index overflow, set back to 0.\n");
  3748. eq->cons_index = 0;
  3749. }
  3750. }
  3751. set_eq_cons_index_v1(eq, 0);
  3752. return aeqes_found;
  3753. }
  3754. static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
  3755. {
  3756. unsigned long off = (entry & (eq->entries - 1)) *
  3757. HNS_ROCE_CEQ_ENTRY_SIZE;
  3758. return (struct hns_roce_ceqe *)((u8 *)
  3759. (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
  3760. off % HNS_ROCE_BA_SIZE);
  3761. }
  3762. static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
  3763. {
  3764. struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
  3765. return (!!(roce_get_bit(ceqe->comp,
  3766. HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
  3767. (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
  3768. }
  3769. static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
  3770. struct hns_roce_eq *eq)
  3771. {
  3772. struct hns_roce_ceqe *ceqe;
  3773. int ceqes_found = 0;
  3774. u32 cqn;
  3775. while ((ceqe = next_ceqe_sw_v1(eq))) {
  3776. /* Make sure we read CEQ entry after we have checked the
  3777. * ownership bit
  3778. */
  3779. dma_rmb();
  3780. cqn = roce_get_field(ceqe->comp,
  3781. HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
  3782. HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
  3783. hns_roce_cq_completion(hr_dev, cqn);
  3784. ++eq->cons_index;
  3785. ceqes_found = 1;
  3786. if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth - 1) {
  3787. dev_warn(&eq->hr_dev->pdev->dev,
  3788. "cons_index overflow, set back to 0.\n");
  3789. eq->cons_index = 0;
  3790. }
  3791. }
  3792. set_eq_cons_index_v1(eq, 0);
  3793. return ceqes_found;
  3794. }
  3795. static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
  3796. {
  3797. struct hns_roce_eq *eq = eq_ptr;
  3798. struct hns_roce_dev *hr_dev = eq->hr_dev;
  3799. int int_work = 0;
  3800. if (eq->type_flag == HNS_ROCE_CEQ)
  3801. /* CEQ irq routine, CEQ is pulse irq, not clear */
  3802. int_work = hns_roce_v1_ceq_int(hr_dev, eq);
  3803. else
  3804. /* AEQ irq routine, AEQ is pulse irq, not clear */
  3805. int_work = hns_roce_v1_aeq_int(hr_dev, eq);
  3806. return IRQ_RETVAL(int_work);
  3807. }
  3808. static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
  3809. {
  3810. struct hns_roce_dev *hr_dev = dev_id;
  3811. struct device *dev = &hr_dev->pdev->dev;
  3812. int int_work = 0;
  3813. u32 caepaemask_val;
  3814. u32 cealmovf_val;
  3815. u32 caepaest_val;
  3816. u32 aeshift_val;
  3817. u32 ceshift_val;
  3818. u32 cemask_val;
  3819. __le32 tmp;
  3820. int i;
  3821. /*
  3822. * Abnormal interrupt:
  3823. * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
  3824. * interrupt, mask irq, clear irq, cancel mask operation
  3825. */
  3826. aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
  3827. tmp = cpu_to_le32(aeshift_val);
  3828. /* AEQE overflow */
  3829. if (roce_get_bit(tmp,
  3830. ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
  3831. dev_warn(dev, "AEQ overflow!\n");
  3832. /* Set mask */
  3833. caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
  3834. tmp = cpu_to_le32(caepaemask_val);
  3835. roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
  3836. HNS_ROCE_INT_MASK_ENABLE);
  3837. caepaemask_val = le32_to_cpu(tmp);
  3838. roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
  3839. /* Clear int state(INT_WC : write 1 clear) */
  3840. caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
  3841. tmp = cpu_to_le32(caepaest_val);
  3842. roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
  3843. caepaest_val = le32_to_cpu(tmp);
  3844. roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
  3845. /* Clear mask */
  3846. caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
  3847. tmp = cpu_to_le32(caepaemask_val);
  3848. roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
  3849. HNS_ROCE_INT_MASK_DISABLE);
  3850. caepaemask_val = le32_to_cpu(tmp);
  3851. roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
  3852. }
  3853. /* CEQ almost overflow */
  3854. for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
  3855. ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
  3856. i * CEQ_REG_OFFSET);
  3857. tmp = cpu_to_le32(ceshift_val);
  3858. if (roce_get_bit(tmp,
  3859. ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
  3860. dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
  3861. int_work++;
  3862. /* Set mask */
  3863. cemask_val = roce_read(hr_dev,
  3864. ROCEE_CAEP_CE_IRQ_MASK_0_REG +
  3865. i * CEQ_REG_OFFSET);
  3866. tmp = cpu_to_le32(cemask_val);
  3867. roce_set_bit(tmp,
  3868. ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
  3869. HNS_ROCE_INT_MASK_ENABLE);
  3870. cemask_val = le32_to_cpu(tmp);
  3871. roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
  3872. i * CEQ_REG_OFFSET, cemask_val);
  3873. /* Clear int state(INT_WC : write 1 clear) */
  3874. cealmovf_val = roce_read(hr_dev,
  3875. ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
  3876. i * CEQ_REG_OFFSET);
  3877. tmp = cpu_to_le32(cealmovf_val);
  3878. roce_set_bit(tmp,
  3879. ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
  3880. 1);
  3881. cealmovf_val = le32_to_cpu(tmp);
  3882. roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
  3883. i * CEQ_REG_OFFSET, cealmovf_val);
  3884. /* Clear mask */
  3885. cemask_val = roce_read(hr_dev,
  3886. ROCEE_CAEP_CE_IRQ_MASK_0_REG +
  3887. i * CEQ_REG_OFFSET);
  3888. tmp = cpu_to_le32(cemask_val);
  3889. roce_set_bit(tmp,
  3890. ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
  3891. HNS_ROCE_INT_MASK_DISABLE);
  3892. cemask_val = le32_to_cpu(tmp);
  3893. roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
  3894. i * CEQ_REG_OFFSET, cemask_val);
  3895. }
  3896. }
  3897. /* ECC multi-bit error alarm */
  3898. dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
  3899. roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
  3900. roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
  3901. roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
  3902. dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
  3903. roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
  3904. roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
  3905. roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
  3906. return IRQ_RETVAL(int_work);
  3907. }
  3908. static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
  3909. {
  3910. u32 aemask_val;
  3911. int masken = 0;
  3912. __le32 tmp;
  3913. int i;
  3914. /* AEQ INT */
  3915. aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
  3916. tmp = cpu_to_le32(aemask_val);
  3917. roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
  3918. masken);
  3919. roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
  3920. aemask_val = le32_to_cpu(tmp);
  3921. roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
  3922. /* CEQ INT */
  3923. for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
  3924. /* IRQ mask */
  3925. roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
  3926. i * CEQ_REG_OFFSET, masken);
  3927. }
  3928. }
  3929. static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
  3930. struct hns_roce_eq *eq)
  3931. {
  3932. int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
  3933. HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
  3934. int i;
  3935. if (!eq->buf_list)
  3936. return;
  3937. for (i = 0; i < npages; ++i)
  3938. dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
  3939. eq->buf_list[i].buf, eq->buf_list[i].map);
  3940. kfree(eq->buf_list);
  3941. }
  3942. static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
  3943. int enable_flag)
  3944. {
  3945. void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
  3946. __le32 tmp;
  3947. u32 val;
  3948. val = readl(eqc);
  3949. tmp = cpu_to_le32(val);
  3950. if (enable_flag)
  3951. roce_set_field(tmp,
  3952. ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
  3953. ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
  3954. HNS_ROCE_EQ_STAT_VALID);
  3955. else
  3956. roce_set_field(tmp,
  3957. ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
  3958. ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
  3959. HNS_ROCE_EQ_STAT_INVALID);
  3960. val = le32_to_cpu(tmp);
  3961. writel(val, eqc);
  3962. }
  3963. static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
  3964. struct hns_roce_eq *eq)
  3965. {
  3966. void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
  3967. struct device *dev = &hr_dev->pdev->dev;
  3968. dma_addr_t tmp_dma_addr;
  3969. u32 eqconsindx_val = 0;
  3970. u32 eqcuridx_val = 0;
  3971. u32 eqshift_val = 0;
  3972. __le32 tmp2 = 0;
  3973. __le32 tmp1 = 0;
  3974. __le32 tmp = 0;
  3975. int num_bas;
  3976. int ret;
  3977. int i;
  3978. num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
  3979. HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
  3980. if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
  3981. dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
  3982. (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
  3983. num_bas);
  3984. return -EINVAL;
  3985. }
  3986. eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
  3987. if (!eq->buf_list)
  3988. return -ENOMEM;
  3989. for (i = 0; i < num_bas; ++i) {
  3990. eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
  3991. &tmp_dma_addr,
  3992. GFP_KERNEL);
  3993. if (!eq->buf_list[i].buf) {
  3994. ret = -ENOMEM;
  3995. goto err_out_free_pages;
  3996. }
  3997. eq->buf_list[i].map = tmp_dma_addr;
  3998. memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
  3999. }
  4000. eq->cons_index = 0;
  4001. roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
  4002. ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
  4003. HNS_ROCE_EQ_STAT_INVALID);
  4004. roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
  4005. ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
  4006. eq->log_entries);
  4007. eqshift_val = le32_to_cpu(tmp);
  4008. writel(eqshift_val, eqc);
  4009. /* Configure eq extended address 12~44bit */
  4010. writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
  4011. /*
  4012. * Configure eq extended address 45~49 bit.
  4013. * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
  4014. * using 4K page, and shift more 32 because of
  4015. * caculating the high 32 bit value evaluated to hardware.
  4016. */
  4017. roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
  4018. ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
  4019. eq->buf_list[0].map >> 44);
  4020. roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
  4021. ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
  4022. eqcuridx_val = le32_to_cpu(tmp1);
  4023. writel(eqcuridx_val, eqc + 8);
  4024. /* Configure eq consumer index */
  4025. roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
  4026. ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
  4027. eqconsindx_val = le32_to_cpu(tmp2);
  4028. writel(eqconsindx_val, eqc + 0xc);
  4029. return 0;
  4030. err_out_free_pages:
  4031. for (i -= 1; i >= 0; i--)
  4032. dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
  4033. eq->buf_list[i].map);
  4034. kfree(eq->buf_list);
  4035. return ret;
  4036. }
  4037. static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
  4038. {
  4039. struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
  4040. struct device *dev = &hr_dev->pdev->dev;
  4041. struct hns_roce_eq *eq;
  4042. int irq_num;
  4043. int eq_num;
  4044. int ret;
  4045. int i, j;
  4046. eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
  4047. irq_num = eq_num + hr_dev->caps.num_other_vectors;
  4048. eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
  4049. if (!eq_table->eq)
  4050. return -ENOMEM;
  4051. eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
  4052. GFP_KERNEL);
  4053. if (!eq_table->eqc_base) {
  4054. ret = -ENOMEM;
  4055. goto err_eqc_base_alloc_fail;
  4056. }
  4057. for (i = 0; i < eq_num; i++) {
  4058. eq = &eq_table->eq[i];
  4059. eq->hr_dev = hr_dev;
  4060. eq->eqn = i;
  4061. eq->irq = hr_dev->irq[i];
  4062. eq->log_page_size = PAGE_SHIFT;
  4063. if (i < hr_dev->caps.num_comp_vectors) {
  4064. /* CEQ */
  4065. eq_table->eqc_base[i] = hr_dev->reg_base +
  4066. ROCEE_CAEP_CEQC_SHIFT_0_REG +
  4067. CEQ_REG_OFFSET * i;
  4068. eq->type_flag = HNS_ROCE_CEQ;
  4069. eq->doorbell = hr_dev->reg_base +
  4070. ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
  4071. CEQ_REG_OFFSET * i;
  4072. eq->entries = hr_dev->caps.ceqe_depth;
  4073. eq->log_entries = ilog2(eq->entries);
  4074. eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
  4075. } else {
  4076. /* AEQ */
  4077. eq_table->eqc_base[i] = hr_dev->reg_base +
  4078. ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
  4079. eq->type_flag = HNS_ROCE_AEQ;
  4080. eq->doorbell = hr_dev->reg_base +
  4081. ROCEE_CAEP_AEQE_CONS_IDX_REG;
  4082. eq->entries = hr_dev->caps.aeqe_depth;
  4083. eq->log_entries = ilog2(eq->entries);
  4084. eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
  4085. }
  4086. }
  4087. /* Disable irq */
  4088. hns_roce_v1_int_mask_enable(hr_dev);
  4089. /* Configure ce int interval */
  4090. roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
  4091. HNS_ROCE_CEQ_DEFAULT_INTERVAL);
  4092. /* Configure ce int burst num */
  4093. roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
  4094. HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
  4095. for (i = 0; i < eq_num; i++) {
  4096. ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
  4097. if (ret) {
  4098. dev_err(dev, "eq create failed\n");
  4099. goto err_create_eq_fail;
  4100. }
  4101. }
  4102. for (j = 0; j < irq_num; j++) {
  4103. if (j < eq_num)
  4104. ret = request_irq(hr_dev->irq[j],
  4105. hns_roce_v1_msix_interrupt_eq, 0,
  4106. hr_dev->irq_names[j],
  4107. &eq_table->eq[j]);
  4108. else
  4109. ret = request_irq(hr_dev->irq[j],
  4110. hns_roce_v1_msix_interrupt_abn, 0,
  4111. hr_dev->irq_names[j], hr_dev);
  4112. if (ret) {
  4113. dev_err(dev, "request irq error!\n");
  4114. goto err_request_irq_fail;
  4115. }
  4116. }
  4117. for (i = 0; i < eq_num; i++)
  4118. hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
  4119. return 0;
  4120. err_request_irq_fail:
  4121. for (j -= 1; j >= 0; j--)
  4122. free_irq(hr_dev->irq[j], &eq_table->eq[j]);
  4123. err_create_eq_fail:
  4124. for (i -= 1; i >= 0; i--)
  4125. hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
  4126. kfree(eq_table->eqc_base);
  4127. err_eqc_base_alloc_fail:
  4128. kfree(eq_table->eq);
  4129. return ret;
  4130. }
  4131. static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
  4132. {
  4133. struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
  4134. int irq_num;
  4135. int eq_num;
  4136. int i;
  4137. eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
  4138. irq_num = eq_num + hr_dev->caps.num_other_vectors;
  4139. for (i = 0; i < eq_num; i++) {
  4140. /* Disable EQ */
  4141. hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
  4142. free_irq(hr_dev->irq[i], &eq_table->eq[i]);
  4143. hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
  4144. }
  4145. for (i = eq_num; i < irq_num; i++)
  4146. free_irq(hr_dev->irq[i], hr_dev);
  4147. kfree(eq_table->eqc_base);
  4148. kfree(eq_table->eq);
  4149. }
  4150. static const struct hns_roce_hw hns_roce_hw_v1 = {
  4151. .reset = hns_roce_v1_reset,
  4152. .hw_profile = hns_roce_v1_profile,
  4153. .hw_init = hns_roce_v1_init,
  4154. .hw_exit = hns_roce_v1_exit,
  4155. .post_mbox = hns_roce_v1_post_mbox,
  4156. .chk_mbox = hns_roce_v1_chk_mbox,
  4157. .set_gid = hns_roce_v1_set_gid,
  4158. .set_mac = hns_roce_v1_set_mac,
  4159. .set_mtu = hns_roce_v1_set_mtu,
  4160. .write_mtpt = hns_roce_v1_write_mtpt,
  4161. .write_cqc = hns_roce_v1_write_cqc,
  4162. .modify_cq = hns_roce_v1_modify_cq,
  4163. .clear_hem = hns_roce_v1_clear_hem,
  4164. .modify_qp = hns_roce_v1_modify_qp,
  4165. .query_qp = hns_roce_v1_query_qp,
  4166. .destroy_qp = hns_roce_v1_destroy_qp,
  4167. .post_send = hns_roce_v1_post_send,
  4168. .post_recv = hns_roce_v1_post_recv,
  4169. .req_notify_cq = hns_roce_v1_req_notify_cq,
  4170. .poll_cq = hns_roce_v1_poll_cq,
  4171. .dereg_mr = hns_roce_v1_dereg_mr,
  4172. .destroy_cq = hns_roce_v1_destroy_cq,
  4173. .init_eq = hns_roce_v1_init_eq_table,
  4174. .cleanup_eq = hns_roce_v1_cleanup_eq_table,
  4175. };
  4176. static const struct of_device_id hns_roce_of_match[] = {
  4177. { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
  4178. {},
  4179. };
  4180. MODULE_DEVICE_TABLE(of, hns_roce_of_match);
  4181. static const struct acpi_device_id hns_roce_acpi_match[] = {
  4182. { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
  4183. {},
  4184. };
  4185. MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
  4186. static int hns_roce_node_match(struct device *dev, void *fwnode)
  4187. {
  4188. return dev->fwnode == fwnode;
  4189. }
  4190. static struct
  4191. platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
  4192. {
  4193. struct device *dev;
  4194. /* get the 'device' corresponding to the matching 'fwnode' */
  4195. dev = bus_find_device(&platform_bus_type, NULL,
  4196. fwnode, hns_roce_node_match);
  4197. /* get the platform device */
  4198. return dev ? to_platform_device(dev) : NULL;
  4199. }
  4200. static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
  4201. {
  4202. struct device *dev = &hr_dev->pdev->dev;
  4203. struct platform_device *pdev = NULL;
  4204. struct net_device *netdev = NULL;
  4205. struct device_node *net_node;
  4206. struct resource *res;
  4207. int port_cnt = 0;
  4208. u8 phy_port;
  4209. int ret;
  4210. int i;
  4211. /* check if we are compatible with the underlying SoC */
  4212. if (dev_of_node(dev)) {
  4213. const struct of_device_id *of_id;
  4214. of_id = of_match_node(hns_roce_of_match, dev->of_node);
  4215. if (!of_id) {
  4216. dev_err(dev, "device is not compatible!\n");
  4217. return -ENXIO;
  4218. }
  4219. hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
  4220. if (!hr_dev->hw) {
  4221. dev_err(dev, "couldn't get H/W specific DT data!\n");
  4222. return -ENXIO;
  4223. }
  4224. } else if (is_acpi_device_node(dev->fwnode)) {
  4225. const struct acpi_device_id *acpi_id;
  4226. acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
  4227. if (!acpi_id) {
  4228. dev_err(dev, "device is not compatible!\n");
  4229. return -ENXIO;
  4230. }
  4231. hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
  4232. if (!hr_dev->hw) {
  4233. dev_err(dev, "couldn't get H/W specific ACPI data!\n");
  4234. return -ENXIO;
  4235. }
  4236. } else {
  4237. dev_err(dev, "can't read compatibility data from DT or ACPI\n");
  4238. return -ENXIO;
  4239. }
  4240. /* get the mapped register base address */
  4241. res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
  4242. hr_dev->reg_base = devm_ioremap_resource(dev, res);
  4243. if (IS_ERR(hr_dev->reg_base))
  4244. return PTR_ERR(hr_dev->reg_base);
  4245. /* read the node_guid of IB device from the DT or ACPI */
  4246. ret = device_property_read_u8_array(dev, "node-guid",
  4247. (u8 *)&hr_dev->ib_dev.node_guid,
  4248. GUID_LEN);
  4249. if (ret) {
  4250. dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
  4251. return ret;
  4252. }
  4253. /* get the RoCE associated ethernet ports or netdevices */
  4254. for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
  4255. if (dev_of_node(dev)) {
  4256. net_node = of_parse_phandle(dev->of_node, "eth-handle",
  4257. i);
  4258. if (!net_node)
  4259. continue;
  4260. pdev = of_find_device_by_node(net_node);
  4261. } else if (is_acpi_device_node(dev->fwnode)) {
  4262. struct fwnode_reference_args args;
  4263. ret = acpi_node_get_property_reference(dev->fwnode,
  4264. "eth-handle",
  4265. i, &args);
  4266. if (ret)
  4267. continue;
  4268. pdev = hns_roce_find_pdev(args.fwnode);
  4269. } else {
  4270. dev_err(dev, "cannot read data from DT or ACPI\n");
  4271. return -ENXIO;
  4272. }
  4273. if (pdev) {
  4274. netdev = platform_get_drvdata(pdev);
  4275. phy_port = (u8)i;
  4276. if (netdev) {
  4277. hr_dev->iboe.netdevs[port_cnt] = netdev;
  4278. hr_dev->iboe.phy_port[port_cnt] = phy_port;
  4279. } else {
  4280. dev_err(dev, "no netdev found with pdev %s\n",
  4281. pdev->name);
  4282. return -ENODEV;
  4283. }
  4284. port_cnt++;
  4285. }
  4286. }
  4287. if (port_cnt == 0) {
  4288. dev_err(dev, "unable to get eth-handle for available ports!\n");
  4289. return -EINVAL;
  4290. }
  4291. hr_dev->caps.num_ports = port_cnt;
  4292. /* cmd issue mode: 0 is poll, 1 is event */
  4293. hr_dev->cmd_mod = 1;
  4294. hr_dev->loop_idc = 0;
  4295. hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
  4296. hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
  4297. /* read the interrupt names from the DT or ACPI */
  4298. ret = device_property_read_string_array(dev, "interrupt-names",
  4299. hr_dev->irq_names,
  4300. HNS_ROCE_V1_MAX_IRQ_NUM);
  4301. if (ret < 0) {
  4302. dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
  4303. return ret;
  4304. }
  4305. /* fetch the interrupt numbers */
  4306. for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
  4307. hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
  4308. if (hr_dev->irq[i] <= 0) {
  4309. dev_err(dev, "platform get of irq[=%d] failed!\n", i);
  4310. return -EINVAL;
  4311. }
  4312. }
  4313. return 0;
  4314. }
  4315. /**
  4316. * hns_roce_probe - RoCE driver entrance
  4317. * @pdev: pointer to platform device
  4318. * Return : int
  4319. *
  4320. */
  4321. static int hns_roce_probe(struct platform_device *pdev)
  4322. {
  4323. int ret;
  4324. struct hns_roce_dev *hr_dev;
  4325. struct device *dev = &pdev->dev;
  4326. hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
  4327. if (!hr_dev)
  4328. return -ENOMEM;
  4329. hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
  4330. if (!hr_dev->priv) {
  4331. ret = -ENOMEM;
  4332. goto error_failed_kzalloc;
  4333. }
  4334. hr_dev->pdev = pdev;
  4335. hr_dev->dev = dev;
  4336. platform_set_drvdata(pdev, hr_dev);
  4337. if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
  4338. dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
  4339. dev_err(dev, "Not usable DMA addressing mode\n");
  4340. ret = -EIO;
  4341. goto error_failed_get_cfg;
  4342. }
  4343. ret = hns_roce_get_cfg(hr_dev);
  4344. if (ret) {
  4345. dev_err(dev, "Get Configuration failed!\n");
  4346. goto error_failed_get_cfg;
  4347. }
  4348. ret = hns_roce_init(hr_dev);
  4349. if (ret) {
  4350. dev_err(dev, "RoCE engine init failed!\n");
  4351. goto error_failed_get_cfg;
  4352. }
  4353. return 0;
  4354. error_failed_get_cfg:
  4355. kfree(hr_dev->priv);
  4356. error_failed_kzalloc:
  4357. ib_dealloc_device(&hr_dev->ib_dev);
  4358. return ret;
  4359. }
  4360. /**
  4361. * hns_roce_remove - remove RoCE device
  4362. * @pdev: pointer to platform device
  4363. */
  4364. static int hns_roce_remove(struct platform_device *pdev)
  4365. {
  4366. struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
  4367. hns_roce_exit(hr_dev);
  4368. kfree(hr_dev->priv);
  4369. ib_dealloc_device(&hr_dev->ib_dev);
  4370. return 0;
  4371. }
  4372. static struct platform_driver hns_roce_driver = {
  4373. .probe = hns_roce_probe,
  4374. .remove = hns_roce_remove,
  4375. .driver = {
  4376. .name = DRV_NAME,
  4377. .of_match_table = hns_roce_of_match,
  4378. .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
  4379. },
  4380. };
  4381. module_platform_driver(hns_roce_driver);
  4382. MODULE_LICENSE("Dual BSD/GPL");
  4383. MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
  4384. MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
  4385. MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
  4386. MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");