qed_roce.c 82 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/types.h>
  33. #include <asm/byteorder.h>
  34. #include <linux/bitops.h>
  35. #include <linux/delay.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/errno.h>
  38. #include <linux/etherdevice.h>
  39. #include <linux/if_ether.h>
  40. #include <linux/if_vlan.h>
  41. #include <linux/io.h>
  42. #include <linux/ip.h>
  43. #include <linux/ipv6.h>
  44. #include <linux/kernel.h>
  45. #include <linux/list.h>
  46. #include <linux/module.h>
  47. #include <linux/mutex.h>
  48. #include <linux/pci.h>
  49. #include <linux/slab.h>
  50. #include <linux/spinlock.h>
  51. #include <linux/string.h>
  52. #include <linux/tcp.h>
  53. #include <linux/bitops.h>
  54. #include <linux/qed/qed_roce_if.h>
  55. #include <linux/qed/qed_roce_if.h>
  56. #include "qed.h"
  57. #include "qed_cxt.h"
  58. #include "qed_hsi.h"
  59. #include "qed_hw.h"
  60. #include "qed_init_ops.h"
  61. #include "qed_int.h"
  62. #include "qed_ll2.h"
  63. #include "qed_mcp.h"
  64. #include "qed_reg_addr.h"
  65. #include "qed_sp.h"
  66. #include "qed_roce.h"
  67. #include "qed_ll2.h"
  68. void qed_async_roce_event(struct qed_hwfn *p_hwfn,
  69. struct event_ring_entry *p_eqe)
  70. {
  71. struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
  72. p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
  73. p_eqe->opcode, &p_eqe->data);
  74. }
  75. static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
  76. struct qed_bmap *bmap, u32 max_count)
  77. {
  78. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
  79. bmap->max_count = max_count;
  80. bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long),
  81. GFP_KERNEL);
  82. if (!bmap->bitmap) {
  83. DP_NOTICE(p_hwfn,
  84. "qed bmap alloc failed: cannot allocate memory (bitmap)\n");
  85. return -ENOMEM;
  86. }
  87. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n",
  88. bmap->bitmap);
  89. return 0;
  90. }
  91. static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
  92. struct qed_bmap *bmap, u32 *id_num)
  93. {
  94. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap);
  95. *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
  96. if (*id_num >= bmap->max_count) {
  97. DP_NOTICE(p_hwfn, "no id available max_count=%d\n",
  98. bmap->max_count);
  99. return -EINVAL;
  100. }
  101. __set_bit(*id_num, bmap->bitmap);
  102. return 0;
  103. }
  104. static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
  105. struct qed_bmap *bmap, u32 id_num)
  106. {
  107. bool b_acquired;
  108. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num);
  109. if (id_num >= bmap->max_count)
  110. return;
  111. b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
  112. if (!b_acquired) {
  113. DP_NOTICE(p_hwfn, "ID %d already released\n", id_num);
  114. return;
  115. }
  116. }
  117. static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
  118. {
  119. /* First sb id for RoCE is after all the l2 sb */
  120. return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
  121. }
  122. static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
  123. struct qed_ptt *p_ptt,
  124. struct qed_rdma_start_in_params *params)
  125. {
  126. struct qed_rdma_info *p_rdma_info;
  127. u32 num_cons, num_tasks;
  128. int rc = -ENOMEM;
  129. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
  130. /* Allocate a struct with current pf rdma info */
  131. p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
  132. if (!p_rdma_info) {
  133. DP_NOTICE(p_hwfn,
  134. "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n",
  135. rc);
  136. return rc;
  137. }
  138. p_hwfn->p_rdma_info = p_rdma_info;
  139. p_rdma_info->proto = PROTOCOLID_ROCE;
  140. num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
  141. NULL);
  142. p_rdma_info->num_qps = num_cons / 2;
  143. num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
  144. /* Each MR uses a single task */
  145. p_rdma_info->num_mrs = num_tasks;
  146. /* Queue zone lines are shared between RoCE and L2 in such a way that
  147. * they can be used by each without obstructing the other.
  148. */
  149. p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
  150. /* Allocate a struct with device params and fill it */
  151. p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
  152. if (!p_rdma_info->dev) {
  153. DP_NOTICE(p_hwfn,
  154. "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
  155. rc);
  156. goto free_rdma_info;
  157. }
  158. /* Allocate a struct with port params and fill it */
  159. p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
  160. if (!p_rdma_info->port) {
  161. DP_NOTICE(p_hwfn,
  162. "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n",
  163. rc);
  164. goto free_rdma_dev;
  165. }
  166. /* Allocate bit map for pd's */
  167. rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS);
  168. if (rc) {
  169. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  170. "Failed to allocate pd_map, rc = %d\n",
  171. rc);
  172. goto free_rdma_port;
  173. }
  174. /* Allocate DPI bitmap */
  175. rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
  176. p_hwfn->dpi_count);
  177. if (rc) {
  178. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  179. "Failed to allocate DPI bitmap, rc = %d\n", rc);
  180. goto free_pd_map;
  181. }
  182. /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
  183. * twice the number of QPs.
  184. */
  185. rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
  186. p_rdma_info->num_qps * 2);
  187. if (rc) {
  188. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  189. "Failed to allocate cq bitmap, rc = %d\n", rc);
  190. goto free_dpi_map;
  191. }
  192. /* Allocate bitmap for toggle bit for cq icids
  193. * We toggle the bit every time we create or resize cq for a given icid.
  194. * The maximum number of CQs is bounded to twice the number of QPs.
  195. */
  196. rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
  197. p_rdma_info->num_qps * 2);
  198. if (rc) {
  199. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  200. "Failed to allocate toogle bits, rc = %d\n", rc);
  201. goto free_cq_map;
  202. }
  203. /* Allocate bitmap for itids */
  204. rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
  205. p_rdma_info->num_mrs);
  206. if (rc) {
  207. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  208. "Failed to allocate itids bitmaps, rc = %d\n", rc);
  209. goto free_toggle_map;
  210. }
  211. /* Allocate bitmap for cids used for qps. */
  212. rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons);
  213. if (rc) {
  214. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  215. "Failed to allocate cid bitmap, rc = %d\n", rc);
  216. goto free_tid_map;
  217. }
  218. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
  219. return 0;
  220. free_tid_map:
  221. kfree(p_rdma_info->tid_map.bitmap);
  222. free_toggle_map:
  223. kfree(p_rdma_info->toggle_bits.bitmap);
  224. free_cq_map:
  225. kfree(p_rdma_info->cq_map.bitmap);
  226. free_dpi_map:
  227. kfree(p_rdma_info->dpi_map.bitmap);
  228. free_pd_map:
  229. kfree(p_rdma_info->pd_map.bitmap);
  230. free_rdma_port:
  231. kfree(p_rdma_info->port);
  232. free_rdma_dev:
  233. kfree(p_rdma_info->dev);
  234. free_rdma_info:
  235. kfree(p_rdma_info);
  236. return rc;
  237. }
  238. static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
  239. {
  240. struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
  241. kfree(p_rdma_info->cid_map.bitmap);
  242. kfree(p_rdma_info->tid_map.bitmap);
  243. kfree(p_rdma_info->toggle_bits.bitmap);
  244. kfree(p_rdma_info->cq_map.bitmap);
  245. kfree(p_rdma_info->dpi_map.bitmap);
  246. kfree(p_rdma_info->pd_map.bitmap);
  247. kfree(p_rdma_info->port);
  248. kfree(p_rdma_info->dev);
  249. kfree(p_rdma_info);
  250. }
  251. static void qed_rdma_free(struct qed_hwfn *p_hwfn)
  252. {
  253. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
  254. qed_rdma_resc_free(p_hwfn);
  255. }
  256. static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
  257. {
  258. guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
  259. guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
  260. guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
  261. guid[3] = 0xff;
  262. guid[4] = 0xfe;
  263. guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
  264. guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
  265. guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
  266. }
  267. static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
  268. struct qed_rdma_start_in_params *params)
  269. {
  270. struct qed_rdma_events *events;
  271. events = &p_hwfn->p_rdma_info->events;
  272. events->unaffiliated_event = params->events->unaffiliated_event;
  273. events->affiliated_event = params->events->affiliated_event;
  274. events->context = params->events->context;
  275. }
  276. static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
  277. struct qed_rdma_start_in_params *params)
  278. {
  279. struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
  280. struct qed_dev *cdev = p_hwfn->cdev;
  281. u32 pci_status_control;
  282. u32 num_qps;
  283. /* Vendor specific information */
  284. dev->vendor_id = cdev->vendor_id;
  285. dev->vendor_part_id = cdev->device_id;
  286. dev->hw_ver = 0;
  287. dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
  288. (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
  289. qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
  290. dev->node_guid = dev->sys_image_guid;
  291. dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
  292. RDMA_MAX_SGE_PER_RQ_WQE);
  293. if (cdev->rdma_max_sge)
  294. dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
  295. dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
  296. dev->max_inline = (cdev->rdma_max_inline) ?
  297. min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
  298. dev->max_inline;
  299. dev->max_wqe = QED_RDMA_MAX_WQE;
  300. dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
  301. /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
  302. * it is up-aligned to 16 and then to ILT page size within qed cxt.
  303. * This is OK in terms of ILT but we don't want to configure the FW
  304. * above its abilities
  305. */
  306. num_qps = ROCE_MAX_QPS;
  307. num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
  308. dev->max_qp = num_qps;
  309. /* CQs uses the same icids that QPs use hence they are limited by the
  310. * number of icids. There are two icids per QP.
  311. */
  312. dev->max_cq = num_qps * 2;
  313. /* The number of mrs is smaller by 1 since the first is reserved */
  314. dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
  315. dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
  316. /* The maximum CQE capacity per CQ supported.
  317. * max number of cqes will be in two layer pbl,
  318. * 8 is the pointer size in bytes
  319. * 32 is the size of cq element in bytes
  320. */
  321. if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
  322. dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
  323. else
  324. dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
  325. dev->max_mw = 0;
  326. dev->max_fmr = QED_RDMA_MAX_FMR;
  327. dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
  328. dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
  329. dev->max_pkey = QED_RDMA_MAX_P_KEY;
  330. dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
  331. (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
  332. dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
  333. RDMA_REQ_RD_ATOMIC_ELM_SIZE;
  334. dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
  335. p_hwfn->p_rdma_info->num_qps;
  336. dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
  337. dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
  338. dev->max_pd = RDMA_MAX_PDS;
  339. dev->max_ah = p_hwfn->p_rdma_info->num_qps;
  340. dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
  341. /* Set capablities */
  342. dev->dev_caps = 0;
  343. SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
  344. SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
  345. SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
  346. SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
  347. SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
  348. SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
  349. SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
  350. SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
  351. /* Check atomic operations support in PCI configuration space. */
  352. pci_read_config_dword(cdev->pdev,
  353. cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
  354. &pci_status_control);
  355. if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
  356. SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
  357. }
  358. static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
  359. {
  360. struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
  361. struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
  362. port->port_state = p_hwfn->mcp_info->link_output.link_up ?
  363. QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
  364. port->max_msg_size = min_t(u64,
  365. (dev->max_mr_mw_fmr_size *
  366. p_hwfn->cdev->rdma_max_sge),
  367. BIT(31));
  368. port->pkey_bad_counter = 0;
  369. }
  370. static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  371. {
  372. u32 ll2_ethertype_en;
  373. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
  374. p_hwfn->b_rdma_enabled_in_prs = false;
  375. qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
  376. p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
  377. /* We delay writing to this reg until first cid is allocated. See
  378. * qed_cxt_dynamic_ilt_alloc function for more details
  379. */
  380. ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
  381. qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
  382. (ll2_ethertype_en | 0x01));
  383. if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
  384. DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
  385. return -EINVAL;
  386. }
  387. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
  388. return 0;
  389. }
  390. static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
  391. struct qed_rdma_start_in_params *params,
  392. struct qed_ptt *p_ptt)
  393. {
  394. struct rdma_init_func_ramrod_data *p_ramrod;
  395. struct qed_rdma_cnq_params *p_cnq_pbl_list;
  396. struct rdma_init_func_hdr *p_params_header;
  397. struct rdma_cnq_params *p_cnq_params;
  398. struct qed_sp_init_data init_data;
  399. struct qed_spq_entry *p_ent;
  400. u32 cnq_id, sb_id;
  401. int rc;
  402. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
  403. /* Save the number of cnqs for the function close ramrod */
  404. p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
  405. /* Get SPQ entry */
  406. memset(&init_data, 0, sizeof(init_data));
  407. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  408. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  409. rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
  410. p_hwfn->p_rdma_info->proto, &init_data);
  411. if (rc)
  412. return rc;
  413. p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
  414. p_params_header = &p_ramrod->params_header;
  415. p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
  416. QED_RDMA_CNQ_RAM);
  417. p_params_header->num_cnqs = params->desired_cnq;
  418. if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
  419. p_params_header->cq_ring_mode = 1;
  420. else
  421. p_params_header->cq_ring_mode = 0;
  422. for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
  423. sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
  424. p_cnq_params = &p_ramrod->cnq_params[cnq_id];
  425. p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
  426. p_cnq_params->sb_num =
  427. cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id);
  428. p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
  429. p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
  430. DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
  431. p_cnq_pbl_list->pbl_ptr);
  432. /* we assume here that cnq_id and qz_offset are the same */
  433. p_cnq_params->queue_zone_num =
  434. cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
  435. cnq_id);
  436. }
  437. return qed_spq_post(p_hwfn, p_ent, NULL);
  438. }
  439. static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
  440. {
  441. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  442. int rc;
  443. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
  444. spin_lock_bh(&p_hwfn->p_rdma_info->lock);
  445. rc = qed_rdma_bmap_alloc_id(p_hwfn,
  446. &p_hwfn->p_rdma_info->tid_map, itid);
  447. spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
  448. if (rc)
  449. goto out;
  450. rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
  451. out:
  452. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
  453. return rc;
  454. }
  455. static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
  456. {
  457. struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
  458. /* The first DPI is reserved for the Kernel */
  459. __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
  460. /* Tid 0 will be used as the key for "reserved MR".
  461. * The driver should allocate memory for it so it can be loaded but no
  462. * ramrod should be passed on it.
  463. */
  464. qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
  465. if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
  466. DP_NOTICE(p_hwfn,
  467. "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
  468. return -EINVAL;
  469. }
  470. return 0;
  471. }
  472. static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
  473. struct qed_ptt *p_ptt,
  474. struct qed_rdma_start_in_params *params)
  475. {
  476. int rc;
  477. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
  478. spin_lock_init(&p_hwfn->p_rdma_info->lock);
  479. qed_rdma_init_devinfo(p_hwfn, params);
  480. qed_rdma_init_port(p_hwfn);
  481. qed_rdma_init_events(p_hwfn, params);
  482. rc = qed_rdma_reserve_lkey(p_hwfn);
  483. if (rc)
  484. return rc;
  485. rc = qed_rdma_init_hw(p_hwfn, p_ptt);
  486. if (rc)
  487. return rc;
  488. return qed_rdma_start_fw(p_hwfn, params, p_ptt);
  489. }
  490. static int qed_rdma_stop(void *rdma_cxt)
  491. {
  492. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  493. struct rdma_close_func_ramrod_data *p_ramrod;
  494. struct qed_sp_init_data init_data;
  495. struct qed_spq_entry *p_ent;
  496. struct qed_ptt *p_ptt;
  497. u32 ll2_ethertype_en;
  498. int rc = -EBUSY;
  499. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
  500. p_ptt = qed_ptt_acquire(p_hwfn);
  501. if (!p_ptt) {
  502. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
  503. return rc;
  504. }
  505. /* Disable RoCE search */
  506. qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
  507. p_hwfn->b_rdma_enabled_in_prs = false;
  508. qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
  509. ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
  510. qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
  511. (ll2_ethertype_en & 0xFFFE));
  512. qed_ptt_release(p_hwfn, p_ptt);
  513. /* Get SPQ entry */
  514. memset(&init_data, 0, sizeof(init_data));
  515. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  516. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  517. /* Stop RoCE */
  518. rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
  519. p_hwfn->p_rdma_info->proto, &init_data);
  520. if (rc)
  521. goto out;
  522. p_ramrod = &p_ent->ramrod.rdma_close_func;
  523. p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
  524. p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
  525. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  526. out:
  527. qed_rdma_free(p_hwfn);
  528. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
  529. return rc;
  530. }
  531. static int qed_rdma_add_user(void *rdma_cxt,
  532. struct qed_rdma_add_user_out_params *out_params)
  533. {
  534. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  535. u32 dpi_start_offset;
  536. u32 returned_id = 0;
  537. int rc;
  538. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
  539. /* Allocate DPI */
  540. spin_lock_bh(&p_hwfn->p_rdma_info->lock);
  541. rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
  542. &returned_id);
  543. spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
  544. out_params->dpi = (u16)returned_id;
  545. /* Calculate the corresponding DPI address */
  546. dpi_start_offset = p_hwfn->dpi_start_offset;
  547. out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
  548. dpi_start_offset +
  549. ((out_params->dpi) * p_hwfn->dpi_size));
  550. out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
  551. dpi_start_offset +
  552. ((out_params->dpi) * p_hwfn->dpi_size);
  553. out_params->dpi_size = p_hwfn->dpi_size;
  554. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
  555. return rc;
  556. }
  557. static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
  558. {
  559. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  560. struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
  561. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
  562. /* Link may have changed */
  563. p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
  564. QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
  565. p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
  566. return p_port;
  567. }
  568. static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
  569. {
  570. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  571. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
  572. /* Return struct with device parameters */
  573. return p_hwfn->p_rdma_info->dev;
  574. }
  575. static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
  576. {
  577. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  578. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
  579. spin_lock_bh(&p_hwfn->p_rdma_info->lock);
  580. qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
  581. spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
  582. }
  583. static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
  584. {
  585. struct qed_hwfn *p_hwfn;
  586. u16 qz_num;
  587. u32 addr;
  588. p_hwfn = (struct qed_hwfn *)rdma_cxt;
  589. qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
  590. addr = GTT_BAR0_MAP_REG_USDM_RAM +
  591. USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
  592. REG_WR16(p_hwfn, addr, prod);
  593. /* keep prod updates ordered */
  594. wmb();
  595. }
  596. static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
  597. struct qed_dev_rdma_info *info)
  598. {
  599. memset(info, 0, sizeof(*info));
  600. info->rdma_type = QED_RDMA_TYPE_ROCE;
  601. qed_fill_dev_info(cdev, &info->common);
  602. return 0;
  603. }
  604. static int qed_rdma_get_sb_start(struct qed_dev *cdev)
  605. {
  606. int feat_num;
  607. if (cdev->num_hwfns > 1)
  608. feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
  609. else
  610. feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
  611. cdev->num_hwfns;
  612. return feat_num;
  613. }
  614. static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
  615. {
  616. int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
  617. int n_msix = cdev->int_params.rdma_msix_cnt;
  618. return min_t(int, n_cnq, n_msix);
  619. }
  620. static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
  621. {
  622. int limit = 0;
  623. /* Mark the fastpath as free/used */
  624. cdev->int_params.fp_initialized = cnt ? true : false;
  625. if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
  626. DP_ERR(cdev,
  627. "qed roce supports only MSI-X interrupts (detected %d).\n",
  628. cdev->int_params.out.int_mode);
  629. return -EINVAL;
  630. } else if (cdev->int_params.fp_msix_cnt) {
  631. limit = cdev->int_params.rdma_msix_cnt;
  632. }
  633. if (!limit)
  634. return -ENOMEM;
  635. return min_t(int, cnt, limit);
  636. }
  637. static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
  638. {
  639. memset(info, 0, sizeof(*info));
  640. if (!cdev->int_params.fp_initialized) {
  641. DP_INFO(cdev,
  642. "Protocol driver requested interrupt information, but its support is not yet configured\n");
  643. return -EINVAL;
  644. }
  645. if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
  646. int msix_base = cdev->int_params.rdma_msix_base;
  647. info->msix_cnt = cdev->int_params.rdma_msix_cnt;
  648. info->msix = &cdev->int_params.msix_table[msix_base];
  649. DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
  650. info->msix_cnt, msix_base);
  651. }
  652. return 0;
  653. }
  654. static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
  655. {
  656. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  657. u32 returned_id;
  658. int rc;
  659. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
  660. /* Allocates an unused protection domain */
  661. spin_lock_bh(&p_hwfn->p_rdma_info->lock);
  662. rc = qed_rdma_bmap_alloc_id(p_hwfn,
  663. &p_hwfn->p_rdma_info->pd_map, &returned_id);
  664. spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
  665. *pd = (u16)returned_id;
  666. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
  667. return rc;
  668. }
  669. static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
  670. {
  671. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  672. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
  673. /* Returns a previously allocated protection domain for reuse */
  674. spin_lock_bh(&p_hwfn->p_rdma_info->lock);
  675. qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
  676. spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
  677. }
  678. static enum qed_rdma_toggle_bit
  679. qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
  680. {
  681. struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
  682. enum qed_rdma_toggle_bit toggle_bit;
  683. u32 bmap_id;
  684. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
  685. /* the function toggle the bit that is related to a given icid
  686. * and returns the new toggle bit's value
  687. */
  688. bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
  689. spin_lock_bh(&p_info->lock);
  690. toggle_bit = !test_and_change_bit(bmap_id,
  691. p_info->toggle_bits.bitmap);
  692. spin_unlock_bh(&p_info->lock);
  693. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
  694. toggle_bit);
  695. return toggle_bit;
  696. }
  697. static int qed_rdma_create_cq(void *rdma_cxt,
  698. struct qed_rdma_create_cq_in_params *params,
  699. u16 *icid)
  700. {
  701. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  702. struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
  703. struct rdma_create_cq_ramrod_data *p_ramrod;
  704. enum qed_rdma_toggle_bit toggle_bit;
  705. struct qed_sp_init_data init_data;
  706. struct qed_spq_entry *p_ent;
  707. u32 returned_id, start_cid;
  708. int rc;
  709. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
  710. params->cq_handle_hi, params->cq_handle_lo);
  711. /* Allocate icid */
  712. spin_lock_bh(&p_info->lock);
  713. rc = qed_rdma_bmap_alloc_id(p_hwfn,
  714. &p_info->cq_map, &returned_id);
  715. spin_unlock_bh(&p_info->lock);
  716. if (rc) {
  717. DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
  718. return rc;
  719. }
  720. start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
  721. p_info->proto);
  722. *icid = returned_id + start_cid;
  723. /* Check if icid requires a page allocation */
  724. rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
  725. if (rc)
  726. goto err;
  727. /* Get SPQ entry */
  728. memset(&init_data, 0, sizeof(init_data));
  729. init_data.cid = *icid;
  730. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  731. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  732. /* Send create CQ ramrod */
  733. rc = qed_sp_init_request(p_hwfn, &p_ent,
  734. RDMA_RAMROD_CREATE_CQ,
  735. p_info->proto, &init_data);
  736. if (rc)
  737. goto err;
  738. p_ramrod = &p_ent->ramrod.rdma_create_cq;
  739. p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
  740. p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
  741. p_ramrod->dpi = cpu_to_le16(params->dpi);
  742. p_ramrod->is_two_level_pbl = params->pbl_two_level;
  743. p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
  744. DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
  745. p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
  746. p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
  747. params->cnq_id;
  748. p_ramrod->int_timeout = params->int_timeout;
  749. /* toggle the bit for every resize or create cq for a given icid */
  750. toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
  751. p_ramrod->toggle_bit = toggle_bit;
  752. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  753. if (rc) {
  754. /* restore toggle bit */
  755. qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
  756. goto err;
  757. }
  758. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
  759. return rc;
  760. err:
  761. /* release allocated icid */
  762. spin_lock_bh(&p_info->lock);
  763. qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
  764. spin_unlock_bh(&p_info->lock);
  765. DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
  766. return rc;
  767. }
  768. static int
  769. qed_rdma_destroy_cq(void *rdma_cxt,
  770. struct qed_rdma_destroy_cq_in_params *in_params,
  771. struct qed_rdma_destroy_cq_out_params *out_params)
  772. {
  773. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  774. struct rdma_destroy_cq_output_params *p_ramrod_res;
  775. struct rdma_destroy_cq_ramrod_data *p_ramrod;
  776. struct qed_sp_init_data init_data;
  777. struct qed_spq_entry *p_ent;
  778. dma_addr_t ramrod_res_phys;
  779. int rc = -ENOMEM;
  780. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
  781. p_ramrod_res =
  782. (struct rdma_destroy_cq_output_params *)
  783. dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  784. sizeof(struct rdma_destroy_cq_output_params),
  785. &ramrod_res_phys, GFP_KERNEL);
  786. if (!p_ramrod_res) {
  787. DP_NOTICE(p_hwfn,
  788. "qed destroy cq failed: cannot allocate memory (ramrod)\n");
  789. return rc;
  790. }
  791. /* Get SPQ entry */
  792. memset(&init_data, 0, sizeof(init_data));
  793. init_data.cid = in_params->icid;
  794. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  795. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  796. /* Send destroy CQ ramrod */
  797. rc = qed_sp_init_request(p_hwfn, &p_ent,
  798. RDMA_RAMROD_DESTROY_CQ,
  799. p_hwfn->p_rdma_info->proto, &init_data);
  800. if (rc)
  801. goto err;
  802. p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
  803. DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
  804. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  805. if (rc)
  806. goto err;
  807. out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
  808. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  809. sizeof(struct rdma_destroy_cq_output_params),
  810. p_ramrod_res, ramrod_res_phys);
  811. /* Free icid */
  812. spin_lock_bh(&p_hwfn->p_rdma_info->lock);
  813. qed_bmap_release_id(p_hwfn,
  814. &p_hwfn->p_rdma_info->cq_map,
  815. (in_params->icid -
  816. qed_cxt_get_proto_cid_start(p_hwfn,
  817. p_hwfn->
  818. p_rdma_info->proto)));
  819. spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
  820. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
  821. return rc;
  822. err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  823. sizeof(struct rdma_destroy_cq_output_params),
  824. p_ramrod_res, ramrod_res_phys);
  825. return rc;
  826. }
  827. static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
  828. {
  829. p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
  830. p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
  831. p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
  832. }
  833. static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
  834. __le32 *dst_gid)
  835. {
  836. u32 i;
  837. if (qp->roce_mode == ROCE_V2_IPV4) {
  838. /* The IPv4 addresses shall be aligned to the highest word.
  839. * The lower words must be zero.
  840. */
  841. memset(src_gid, 0, sizeof(union qed_gid));
  842. memset(dst_gid, 0, sizeof(union qed_gid));
  843. src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
  844. dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
  845. } else {
  846. /* GIDs and IPv6 addresses coincide in location and size */
  847. for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
  848. src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
  849. dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
  850. }
  851. }
  852. }
  853. static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
  854. {
  855. enum roce_flavor flavor;
  856. switch (roce_mode) {
  857. case ROCE_V1:
  858. flavor = PLAIN_ROCE;
  859. break;
  860. case ROCE_V2_IPV4:
  861. flavor = RROCE_IPV4;
  862. break;
  863. case ROCE_V2_IPV6:
  864. flavor = ROCE_V2_IPV6;
  865. break;
  866. default:
  867. flavor = MAX_ROCE_MODE;
  868. break;
  869. }
  870. return flavor;
  871. }
  872. static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
  873. {
  874. struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
  875. u32 responder_icid;
  876. u32 requester_icid;
  877. int rc;
  878. spin_lock_bh(&p_hwfn->p_rdma_info->lock);
  879. rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
  880. &responder_icid);
  881. if (rc) {
  882. spin_unlock_bh(&p_rdma_info->lock);
  883. return rc;
  884. }
  885. rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
  886. &requester_icid);
  887. spin_unlock_bh(&p_rdma_info->lock);
  888. if (rc)
  889. goto err;
  890. /* the two icid's should be adjacent */
  891. if ((requester_icid - responder_icid) != 1) {
  892. DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
  893. rc = -EINVAL;
  894. goto err;
  895. }
  896. responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
  897. p_rdma_info->proto);
  898. requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
  899. p_rdma_info->proto);
  900. /* If these icids require a new ILT line allocate DMA-able context for
  901. * an ILT page
  902. */
  903. rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
  904. if (rc)
  905. goto err;
  906. rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
  907. if (rc)
  908. goto err;
  909. *cid = (u16)responder_icid;
  910. return rc;
  911. err:
  912. spin_lock_bh(&p_rdma_info->lock);
  913. qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
  914. qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
  915. spin_unlock_bh(&p_rdma_info->lock);
  916. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  917. "Allocate CID - failed, rc = %d\n", rc);
  918. return rc;
  919. }
  920. static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
  921. struct qed_rdma_qp *qp)
  922. {
  923. struct roce_create_qp_resp_ramrod_data *p_ramrod;
  924. struct qed_sp_init_data init_data;
  925. union qed_qm_pq_params qm_params;
  926. enum roce_flavor roce_flavor;
  927. struct qed_spq_entry *p_ent;
  928. u16 physical_queue0 = 0;
  929. int rc;
  930. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
  931. /* Allocate DMA-able memory for IRQ */
  932. qp->irq_num_pages = 1;
  933. qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  934. RDMA_RING_PAGE_SIZE,
  935. &qp->irq_phys_addr, GFP_KERNEL);
  936. if (!qp->irq) {
  937. rc = -ENOMEM;
  938. DP_NOTICE(p_hwfn,
  939. "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
  940. rc);
  941. return rc;
  942. }
  943. /* Get SPQ entry */
  944. memset(&init_data, 0, sizeof(init_data));
  945. init_data.cid = qp->icid;
  946. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  947. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  948. rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
  949. PROTOCOLID_ROCE, &init_data);
  950. if (rc)
  951. goto err;
  952. p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
  953. p_ramrod->flags = 0;
  954. roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
  955. SET_FIELD(p_ramrod->flags,
  956. ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
  957. SET_FIELD(p_ramrod->flags,
  958. ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
  959. qp->incoming_rdma_read_en);
  960. SET_FIELD(p_ramrod->flags,
  961. ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
  962. qp->incoming_rdma_write_en);
  963. SET_FIELD(p_ramrod->flags,
  964. ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
  965. qp->incoming_atomic_en);
  966. SET_FIELD(p_ramrod->flags,
  967. ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
  968. qp->e2e_flow_control_en);
  969. SET_FIELD(p_ramrod->flags,
  970. ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
  971. SET_FIELD(p_ramrod->flags,
  972. ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
  973. qp->fmr_and_reserved_lkey);
  974. SET_FIELD(p_ramrod->flags,
  975. ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
  976. qp->min_rnr_nak_timer);
  977. p_ramrod->max_ird = qp->max_rd_atomic_resp;
  978. p_ramrod->traffic_class = qp->traffic_class_tos;
  979. p_ramrod->hop_limit = qp->hop_limit_ttl;
  980. p_ramrod->irq_num_pages = qp->irq_num_pages;
  981. p_ramrod->p_key = cpu_to_le16(qp->pkey);
  982. p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
  983. p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
  984. p_ramrod->mtu = cpu_to_le16(qp->mtu);
  985. p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
  986. p_ramrod->pd = cpu_to_le16(qp->pd);
  987. p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
  988. DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
  989. DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
  990. qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
  991. p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
  992. p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
  993. p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
  994. p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
  995. p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
  996. p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
  997. qp->rq_cq_id);
  998. memset(&qm_params, 0, sizeof(qm_params));
  999. qm_params.roce.qpid = qp->icid >> 1;
  1000. physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
  1001. p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
  1002. p_ramrod->dpi = cpu_to_le16(qp->dpi);
  1003. qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
  1004. qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
  1005. p_ramrod->udp_src_port = qp->udp_src_port;
  1006. p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
  1007. p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
  1008. p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
  1009. p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
  1010. qp->stats_queue;
  1011. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  1012. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
  1013. rc, physical_queue0);
  1014. if (rc)
  1015. goto err;
  1016. qp->resp_offloaded = true;
  1017. return rc;
  1018. err:
  1019. DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
  1020. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  1021. qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
  1022. qp->irq, qp->irq_phys_addr);
  1023. return rc;
  1024. }
  1025. static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
  1026. struct qed_rdma_qp *qp)
  1027. {
  1028. struct roce_create_qp_req_ramrod_data *p_ramrod;
  1029. struct qed_sp_init_data init_data;
  1030. union qed_qm_pq_params qm_params;
  1031. enum roce_flavor roce_flavor;
  1032. struct qed_spq_entry *p_ent;
  1033. u16 physical_queue0 = 0;
  1034. int rc;
  1035. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
  1036. /* Allocate DMA-able memory for ORQ */
  1037. qp->orq_num_pages = 1;
  1038. qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  1039. RDMA_RING_PAGE_SIZE,
  1040. &qp->orq_phys_addr, GFP_KERNEL);
  1041. if (!qp->orq) {
  1042. rc = -ENOMEM;
  1043. DP_NOTICE(p_hwfn,
  1044. "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
  1045. rc);
  1046. return rc;
  1047. }
  1048. /* Get SPQ entry */
  1049. memset(&init_data, 0, sizeof(init_data));
  1050. init_data.cid = qp->icid + 1;
  1051. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  1052. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  1053. rc = qed_sp_init_request(p_hwfn, &p_ent,
  1054. ROCE_RAMROD_CREATE_QP,
  1055. PROTOCOLID_ROCE, &init_data);
  1056. if (rc)
  1057. goto err;
  1058. p_ramrod = &p_ent->ramrod.roce_create_qp_req;
  1059. p_ramrod->flags = 0;
  1060. roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
  1061. SET_FIELD(p_ramrod->flags,
  1062. ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
  1063. SET_FIELD(p_ramrod->flags,
  1064. ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
  1065. qp->fmr_and_reserved_lkey);
  1066. SET_FIELD(p_ramrod->flags,
  1067. ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
  1068. SET_FIELD(p_ramrod->flags,
  1069. ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
  1070. SET_FIELD(p_ramrod->flags,
  1071. ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
  1072. qp->rnr_retry_cnt);
  1073. p_ramrod->max_ord = qp->max_rd_atomic_req;
  1074. p_ramrod->traffic_class = qp->traffic_class_tos;
  1075. p_ramrod->hop_limit = qp->hop_limit_ttl;
  1076. p_ramrod->orq_num_pages = qp->orq_num_pages;
  1077. p_ramrod->p_key = cpu_to_le16(qp->pkey);
  1078. p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
  1079. p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
  1080. p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
  1081. p_ramrod->mtu = cpu_to_le16(qp->mtu);
  1082. p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
  1083. p_ramrod->pd = cpu_to_le16(qp->pd);
  1084. p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
  1085. DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
  1086. DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
  1087. qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
  1088. p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
  1089. p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
  1090. p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
  1091. p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
  1092. p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
  1093. p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
  1094. qp->sq_cq_id);
  1095. memset(&qm_params, 0, sizeof(qm_params));
  1096. qm_params.roce.qpid = qp->icid >> 1;
  1097. physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
  1098. p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
  1099. p_ramrod->dpi = cpu_to_le16(qp->dpi);
  1100. qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
  1101. qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
  1102. p_ramrod->udp_src_port = qp->udp_src_port;
  1103. p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
  1104. p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
  1105. qp->stats_queue;
  1106. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  1107. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
  1108. if (rc)
  1109. goto err;
  1110. qp->req_offloaded = true;
  1111. return rc;
  1112. err:
  1113. DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
  1114. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  1115. qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
  1116. qp->orq, qp->orq_phys_addr);
  1117. return rc;
  1118. }
  1119. static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
  1120. struct qed_rdma_qp *qp,
  1121. bool move_to_err, u32 modify_flags)
  1122. {
  1123. struct roce_modify_qp_resp_ramrod_data *p_ramrod;
  1124. struct qed_sp_init_data init_data;
  1125. struct qed_spq_entry *p_ent;
  1126. int rc;
  1127. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
  1128. if (move_to_err && !qp->resp_offloaded)
  1129. return 0;
  1130. /* Get SPQ entry */
  1131. memset(&init_data, 0, sizeof(init_data));
  1132. init_data.cid = qp->icid;
  1133. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  1134. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  1135. rc = qed_sp_init_request(p_hwfn, &p_ent,
  1136. ROCE_EVENT_MODIFY_QP,
  1137. PROTOCOLID_ROCE, &init_data);
  1138. if (rc) {
  1139. DP_NOTICE(p_hwfn, "rc = %d\n", rc);
  1140. return rc;
  1141. }
  1142. p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
  1143. p_ramrod->flags = 0;
  1144. SET_FIELD(p_ramrod->flags,
  1145. ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
  1146. SET_FIELD(p_ramrod->flags,
  1147. ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
  1148. qp->incoming_rdma_read_en);
  1149. SET_FIELD(p_ramrod->flags,
  1150. ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
  1151. qp->incoming_rdma_write_en);
  1152. SET_FIELD(p_ramrod->flags,
  1153. ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
  1154. qp->incoming_atomic_en);
  1155. SET_FIELD(p_ramrod->flags,
  1156. ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
  1157. qp->e2e_flow_control_en);
  1158. SET_FIELD(p_ramrod->flags,
  1159. ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
  1160. GET_FIELD(modify_flags,
  1161. QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
  1162. SET_FIELD(p_ramrod->flags,
  1163. ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
  1164. GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
  1165. SET_FIELD(p_ramrod->flags,
  1166. ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
  1167. GET_FIELD(modify_flags,
  1168. QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
  1169. SET_FIELD(p_ramrod->flags,
  1170. ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
  1171. GET_FIELD(modify_flags,
  1172. QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
  1173. SET_FIELD(p_ramrod->flags,
  1174. ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
  1175. GET_FIELD(modify_flags,
  1176. QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
  1177. p_ramrod->fields = 0;
  1178. SET_FIELD(p_ramrod->fields,
  1179. ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
  1180. qp->min_rnr_nak_timer);
  1181. p_ramrod->max_ird = qp->max_rd_atomic_resp;
  1182. p_ramrod->traffic_class = qp->traffic_class_tos;
  1183. p_ramrod->hop_limit = qp->hop_limit_ttl;
  1184. p_ramrod->p_key = cpu_to_le16(qp->pkey);
  1185. p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
  1186. p_ramrod->mtu = cpu_to_le16(qp->mtu);
  1187. qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
  1188. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  1189. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
  1190. return rc;
  1191. }
  1192. static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
  1193. struct qed_rdma_qp *qp,
  1194. bool move_to_sqd,
  1195. bool move_to_err, u32 modify_flags)
  1196. {
  1197. struct roce_modify_qp_req_ramrod_data *p_ramrod;
  1198. struct qed_sp_init_data init_data;
  1199. struct qed_spq_entry *p_ent;
  1200. int rc;
  1201. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
  1202. if (move_to_err && !(qp->req_offloaded))
  1203. return 0;
  1204. /* Get SPQ entry */
  1205. memset(&init_data, 0, sizeof(init_data));
  1206. init_data.cid = qp->icid + 1;
  1207. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  1208. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  1209. rc = qed_sp_init_request(p_hwfn, &p_ent,
  1210. ROCE_EVENT_MODIFY_QP,
  1211. PROTOCOLID_ROCE, &init_data);
  1212. if (rc) {
  1213. DP_NOTICE(p_hwfn, "rc = %d\n", rc);
  1214. return rc;
  1215. }
  1216. p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
  1217. p_ramrod->flags = 0;
  1218. SET_FIELD(p_ramrod->flags,
  1219. ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
  1220. SET_FIELD(p_ramrod->flags,
  1221. ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
  1222. SET_FIELD(p_ramrod->flags,
  1223. ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
  1224. qp->sqd_async);
  1225. SET_FIELD(p_ramrod->flags,
  1226. ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
  1227. GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
  1228. SET_FIELD(p_ramrod->flags,
  1229. ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
  1230. GET_FIELD(modify_flags,
  1231. QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
  1232. SET_FIELD(p_ramrod->flags,
  1233. ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
  1234. GET_FIELD(modify_flags,
  1235. QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
  1236. SET_FIELD(p_ramrod->flags,
  1237. ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
  1238. GET_FIELD(modify_flags,
  1239. QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
  1240. SET_FIELD(p_ramrod->flags,
  1241. ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
  1242. GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
  1243. SET_FIELD(p_ramrod->flags,
  1244. ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
  1245. GET_FIELD(modify_flags,
  1246. QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
  1247. p_ramrod->fields = 0;
  1248. SET_FIELD(p_ramrod->fields,
  1249. ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
  1250. SET_FIELD(p_ramrod->fields,
  1251. ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
  1252. qp->rnr_retry_cnt);
  1253. p_ramrod->max_ord = qp->max_rd_atomic_req;
  1254. p_ramrod->traffic_class = qp->traffic_class_tos;
  1255. p_ramrod->hop_limit = qp->hop_limit_ttl;
  1256. p_ramrod->p_key = cpu_to_le16(qp->pkey);
  1257. p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
  1258. p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
  1259. p_ramrod->mtu = cpu_to_le16(qp->mtu);
  1260. qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
  1261. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  1262. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
  1263. return rc;
  1264. }
  1265. static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
  1266. struct qed_rdma_qp *qp,
  1267. u32 *num_invalidated_mw)
  1268. {
  1269. struct roce_destroy_qp_resp_output_params *p_ramrod_res;
  1270. struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
  1271. struct qed_sp_init_data init_data;
  1272. struct qed_spq_entry *p_ent;
  1273. dma_addr_t ramrod_res_phys;
  1274. int rc;
  1275. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
  1276. if (!qp->resp_offloaded)
  1277. return 0;
  1278. /* Get SPQ entry */
  1279. memset(&init_data, 0, sizeof(init_data));
  1280. init_data.cid = qp->icid;
  1281. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  1282. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  1283. rc = qed_sp_init_request(p_hwfn, &p_ent,
  1284. ROCE_RAMROD_DESTROY_QP,
  1285. PROTOCOLID_ROCE, &init_data);
  1286. if (rc)
  1287. return rc;
  1288. p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
  1289. p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
  1290. dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
  1291. &ramrod_res_phys, GFP_KERNEL);
  1292. if (!p_ramrod_res) {
  1293. rc = -ENOMEM;
  1294. DP_NOTICE(p_hwfn,
  1295. "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
  1296. rc);
  1297. return rc;
  1298. }
  1299. DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
  1300. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  1301. if (rc)
  1302. goto err;
  1303. *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
  1304. /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
  1305. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  1306. qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
  1307. qp->irq, qp->irq_phys_addr);
  1308. qp->resp_offloaded = false;
  1309. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
  1310. err:
  1311. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  1312. sizeof(struct roce_destroy_qp_resp_output_params),
  1313. p_ramrod_res, ramrod_res_phys);
  1314. return rc;
  1315. }
  1316. static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
  1317. struct qed_rdma_qp *qp,
  1318. u32 *num_bound_mw)
  1319. {
  1320. struct roce_destroy_qp_req_output_params *p_ramrod_res;
  1321. struct roce_destroy_qp_req_ramrod_data *p_ramrod;
  1322. struct qed_sp_init_data init_data;
  1323. struct qed_spq_entry *p_ent;
  1324. dma_addr_t ramrod_res_phys;
  1325. int rc = -ENOMEM;
  1326. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
  1327. if (!qp->req_offloaded)
  1328. return 0;
  1329. p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
  1330. dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  1331. sizeof(*p_ramrod_res),
  1332. &ramrod_res_phys, GFP_KERNEL);
  1333. if (!p_ramrod_res) {
  1334. DP_NOTICE(p_hwfn,
  1335. "qed destroy requester failed: cannot allocate memory (ramrod)\n");
  1336. return rc;
  1337. }
  1338. /* Get SPQ entry */
  1339. memset(&init_data, 0, sizeof(init_data));
  1340. init_data.cid = qp->icid + 1;
  1341. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  1342. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  1343. rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
  1344. PROTOCOLID_ROCE, &init_data);
  1345. if (rc)
  1346. goto err;
  1347. p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
  1348. DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
  1349. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  1350. if (rc)
  1351. goto err;
  1352. *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
  1353. /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
  1354. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  1355. qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
  1356. qp->orq, qp->orq_phys_addr);
  1357. qp->req_offloaded = false;
  1358. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
  1359. err:
  1360. dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
  1361. p_ramrod_res, ramrod_res_phys);
  1362. return rc;
  1363. }
  1364. static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
  1365. struct qed_rdma_qp *qp,
  1366. struct qed_rdma_query_qp_out_params *out_params)
  1367. {
  1368. struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
  1369. struct roce_query_qp_req_output_params *p_req_ramrod_res;
  1370. struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
  1371. struct roce_query_qp_req_ramrod_data *p_req_ramrod;
  1372. struct qed_sp_init_data init_data;
  1373. dma_addr_t resp_ramrod_res_phys;
  1374. dma_addr_t req_ramrod_res_phys;
  1375. struct qed_spq_entry *p_ent;
  1376. bool rq_err_state;
  1377. bool sq_err_state;
  1378. bool sq_draining;
  1379. int rc = -ENOMEM;
  1380. if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
  1381. /* We can't send ramrod to the fw since this qp wasn't offloaded
  1382. * to the fw yet
  1383. */
  1384. out_params->draining = false;
  1385. out_params->rq_psn = qp->rq_psn;
  1386. out_params->sq_psn = qp->sq_psn;
  1387. out_params->state = qp->cur_state;
  1388. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
  1389. return 0;
  1390. }
  1391. if (!(qp->resp_offloaded)) {
  1392. DP_NOTICE(p_hwfn,
  1393. "The responder's qp should be offloded before requester's\n");
  1394. return -EINVAL;
  1395. }
  1396. /* Send a query responder ramrod to FW to get RQ-PSN and state */
  1397. p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
  1398. dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  1399. sizeof(*p_resp_ramrod_res),
  1400. &resp_ramrod_res_phys, GFP_KERNEL);
  1401. if (!p_resp_ramrod_res) {
  1402. DP_NOTICE(p_hwfn,
  1403. "qed query qp failed: cannot allocate memory (ramrod)\n");
  1404. return rc;
  1405. }
  1406. /* Get SPQ entry */
  1407. memset(&init_data, 0, sizeof(init_data));
  1408. init_data.cid = qp->icid;
  1409. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  1410. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  1411. rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
  1412. PROTOCOLID_ROCE, &init_data);
  1413. if (rc)
  1414. goto err_resp;
  1415. p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
  1416. DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
  1417. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  1418. if (rc)
  1419. goto err_resp;
  1420. out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
  1421. rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
  1422. ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
  1423. dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
  1424. p_resp_ramrod_res, resp_ramrod_res_phys);
  1425. if (!(qp->req_offloaded)) {
  1426. /* Don't send query qp for the requester */
  1427. out_params->sq_psn = qp->sq_psn;
  1428. out_params->draining = false;
  1429. if (rq_err_state)
  1430. qp->cur_state = QED_ROCE_QP_STATE_ERR;
  1431. out_params->state = qp->cur_state;
  1432. return 0;
  1433. }
  1434. /* Send a query requester ramrod to FW to get SQ-PSN and state */
  1435. p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
  1436. dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  1437. sizeof(*p_req_ramrod_res),
  1438. &req_ramrod_res_phys,
  1439. GFP_KERNEL);
  1440. if (!p_req_ramrod_res) {
  1441. rc = -ENOMEM;
  1442. DP_NOTICE(p_hwfn,
  1443. "qed query qp failed: cannot allocate memory (ramrod)\n");
  1444. return rc;
  1445. }
  1446. /* Get SPQ entry */
  1447. init_data.cid = qp->icid + 1;
  1448. rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
  1449. PROTOCOLID_ROCE, &init_data);
  1450. if (rc)
  1451. goto err_req;
  1452. p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
  1453. DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
  1454. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  1455. if (rc)
  1456. goto err_req;
  1457. out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
  1458. sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
  1459. ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
  1460. sq_draining =
  1461. GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
  1462. ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
  1463. dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
  1464. p_req_ramrod_res, req_ramrod_res_phys);
  1465. out_params->draining = false;
  1466. if (rq_err_state)
  1467. qp->cur_state = QED_ROCE_QP_STATE_ERR;
  1468. else if (sq_err_state)
  1469. qp->cur_state = QED_ROCE_QP_STATE_SQE;
  1470. else if (sq_draining)
  1471. out_params->draining = true;
  1472. out_params->state = qp->cur_state;
  1473. return 0;
  1474. err_req:
  1475. dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
  1476. p_req_ramrod_res, req_ramrod_res_phys);
  1477. return rc;
  1478. err_resp:
  1479. dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
  1480. p_resp_ramrod_res, resp_ramrod_res_phys);
  1481. return rc;
  1482. }
  1483. static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
  1484. {
  1485. struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
  1486. u32 num_invalidated_mw = 0;
  1487. u32 num_bound_mw = 0;
  1488. u32 start_cid;
  1489. int rc;
  1490. /* Destroys the specified QP */
  1491. if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
  1492. (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
  1493. (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
  1494. DP_NOTICE(p_hwfn,
  1495. "QP must be in error, reset or init state before destroying it\n");
  1496. return -EINVAL;
  1497. }
  1498. if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
  1499. rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
  1500. &num_invalidated_mw);
  1501. if (rc)
  1502. return rc;
  1503. /* Send destroy requester ramrod */
  1504. rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
  1505. &num_bound_mw);
  1506. if (rc)
  1507. return rc;
  1508. if (num_invalidated_mw != num_bound_mw) {
  1509. DP_NOTICE(p_hwfn,
  1510. "number of invalidate memory windows is different from bounded ones\n");
  1511. return -EINVAL;
  1512. }
  1513. spin_lock_bh(&p_rdma_info->lock);
  1514. start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
  1515. p_rdma_info->proto);
  1516. /* Release responder's icid */
  1517. qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
  1518. qp->icid - start_cid);
  1519. /* Release requester's icid */
  1520. qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
  1521. qp->icid + 1 - start_cid);
  1522. spin_unlock_bh(&p_rdma_info->lock);
  1523. }
  1524. return 0;
  1525. }
  1526. static int qed_rdma_query_qp(void *rdma_cxt,
  1527. struct qed_rdma_qp *qp,
  1528. struct qed_rdma_query_qp_out_params *out_params)
  1529. {
  1530. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  1531. int rc;
  1532. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
  1533. /* The following fields are filled in from qp and not FW as they can't
  1534. * be modified by FW
  1535. */
  1536. out_params->mtu = qp->mtu;
  1537. out_params->dest_qp = qp->dest_qp;
  1538. out_params->incoming_atomic_en = qp->incoming_atomic_en;
  1539. out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
  1540. out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
  1541. out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
  1542. out_params->dgid = qp->dgid;
  1543. out_params->flow_label = qp->flow_label;
  1544. out_params->hop_limit_ttl = qp->hop_limit_ttl;
  1545. out_params->traffic_class_tos = qp->traffic_class_tos;
  1546. out_params->timeout = qp->ack_timeout;
  1547. out_params->rnr_retry = qp->rnr_retry_cnt;
  1548. out_params->retry_cnt = qp->retry_cnt;
  1549. out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
  1550. out_params->pkey_index = 0;
  1551. out_params->max_rd_atomic = qp->max_rd_atomic_req;
  1552. out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
  1553. out_params->sqd_async = qp->sqd_async;
  1554. rc = qed_roce_query_qp(p_hwfn, qp, out_params);
  1555. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
  1556. return rc;
  1557. }
  1558. static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
  1559. {
  1560. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  1561. int rc = 0;
  1562. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
  1563. rc = qed_roce_destroy_qp(p_hwfn, qp);
  1564. /* free qp params struct */
  1565. kfree(qp);
  1566. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
  1567. return rc;
  1568. }
  1569. static struct qed_rdma_qp *
  1570. qed_rdma_create_qp(void *rdma_cxt,
  1571. struct qed_rdma_create_qp_in_params *in_params,
  1572. struct qed_rdma_create_qp_out_params *out_params)
  1573. {
  1574. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  1575. struct qed_rdma_qp *qp;
  1576. u8 max_stats_queues;
  1577. int rc;
  1578. if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
  1579. DP_ERR(p_hwfn->cdev,
  1580. "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
  1581. rdma_cxt, in_params, out_params);
  1582. return NULL;
  1583. }
  1584. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  1585. "qed rdma create qp called with qp_handle = %08x%08x\n",
  1586. in_params->qp_handle_hi, in_params->qp_handle_lo);
  1587. /* Some sanity checks... */
  1588. max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
  1589. if (in_params->stats_queue >= max_stats_queues) {
  1590. DP_ERR(p_hwfn->cdev,
  1591. "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
  1592. in_params->stats_queue, max_stats_queues);
  1593. return NULL;
  1594. }
  1595. qp = kzalloc(sizeof(*qp), GFP_KERNEL);
  1596. if (!qp) {
  1597. DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n");
  1598. return NULL;
  1599. }
  1600. rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
  1601. qp->qpid = ((0xFF << 16) | qp->icid);
  1602. DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
  1603. if (rc) {
  1604. kfree(qp);
  1605. return NULL;
  1606. }
  1607. qp->cur_state = QED_ROCE_QP_STATE_RESET;
  1608. qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
  1609. qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
  1610. qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
  1611. qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
  1612. qp->use_srq = in_params->use_srq;
  1613. qp->signal_all = in_params->signal_all;
  1614. qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
  1615. qp->pd = in_params->pd;
  1616. qp->dpi = in_params->dpi;
  1617. qp->sq_cq_id = in_params->sq_cq_id;
  1618. qp->sq_num_pages = in_params->sq_num_pages;
  1619. qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
  1620. qp->rq_cq_id = in_params->rq_cq_id;
  1621. qp->rq_num_pages = in_params->rq_num_pages;
  1622. qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
  1623. qp->srq_id = in_params->srq_id;
  1624. qp->req_offloaded = false;
  1625. qp->resp_offloaded = false;
  1626. qp->e2e_flow_control_en = qp->use_srq ? false : true;
  1627. qp->stats_queue = in_params->stats_queue;
  1628. out_params->icid = qp->icid;
  1629. out_params->qp_id = qp->qpid;
  1630. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
  1631. return qp;
  1632. }
  1633. static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
  1634. struct qed_rdma_qp *qp,
  1635. enum qed_roce_qp_state prev_state,
  1636. struct qed_rdma_modify_qp_in_params *params)
  1637. {
  1638. u32 num_invalidated_mw = 0, num_bound_mw = 0;
  1639. int rc = 0;
  1640. /* Perform additional operations according to the current state and the
  1641. * next state
  1642. */
  1643. if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
  1644. (prev_state == QED_ROCE_QP_STATE_RESET)) &&
  1645. (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
  1646. /* Init->RTR or Reset->RTR */
  1647. rc = qed_roce_sp_create_responder(p_hwfn, qp);
  1648. return rc;
  1649. } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
  1650. (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
  1651. /* RTR-> RTS */
  1652. rc = qed_roce_sp_create_requester(p_hwfn, qp);
  1653. if (rc)
  1654. return rc;
  1655. /* Send modify responder ramrod */
  1656. rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
  1657. params->modify_flags);
  1658. return rc;
  1659. } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
  1660. (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
  1661. /* RTS->RTS */
  1662. rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
  1663. params->modify_flags);
  1664. if (rc)
  1665. return rc;
  1666. rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
  1667. params->modify_flags);
  1668. return rc;
  1669. } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
  1670. (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
  1671. /* RTS->SQD */
  1672. rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
  1673. params->modify_flags);
  1674. return rc;
  1675. } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
  1676. (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
  1677. /* SQD->SQD */
  1678. rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
  1679. params->modify_flags);
  1680. if (rc)
  1681. return rc;
  1682. rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
  1683. params->modify_flags);
  1684. return rc;
  1685. } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
  1686. (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
  1687. /* SQD->RTS */
  1688. rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
  1689. params->modify_flags);
  1690. if (rc)
  1691. return rc;
  1692. rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
  1693. params->modify_flags);
  1694. return rc;
  1695. } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR ||
  1696. qp->cur_state == QED_ROCE_QP_STATE_SQE) {
  1697. /* ->ERR */
  1698. rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
  1699. params->modify_flags);
  1700. if (rc)
  1701. return rc;
  1702. rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
  1703. params->modify_flags);
  1704. return rc;
  1705. } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
  1706. /* Any state -> RESET */
  1707. rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
  1708. &num_invalidated_mw);
  1709. if (rc)
  1710. return rc;
  1711. rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
  1712. &num_bound_mw);
  1713. if (num_invalidated_mw != num_bound_mw) {
  1714. DP_NOTICE(p_hwfn,
  1715. "number of invalidate memory windows is different from bounded ones\n");
  1716. return -EINVAL;
  1717. }
  1718. } else {
  1719. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
  1720. }
  1721. return rc;
  1722. }
  1723. static int qed_rdma_modify_qp(void *rdma_cxt,
  1724. struct qed_rdma_qp *qp,
  1725. struct qed_rdma_modify_qp_in_params *params)
  1726. {
  1727. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  1728. enum qed_roce_qp_state prev_state;
  1729. int rc = 0;
  1730. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
  1731. qp->icid, params->new_state);
  1732. if (rc) {
  1733. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
  1734. return rc;
  1735. }
  1736. if (GET_FIELD(params->modify_flags,
  1737. QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
  1738. qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
  1739. qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
  1740. qp->incoming_atomic_en = params->incoming_atomic_en;
  1741. }
  1742. /* Update QP structure with the updated values */
  1743. if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
  1744. qp->roce_mode = params->roce_mode;
  1745. if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
  1746. qp->pkey = params->pkey;
  1747. if (GET_FIELD(params->modify_flags,
  1748. QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
  1749. qp->e2e_flow_control_en = params->e2e_flow_control_en;
  1750. if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
  1751. qp->dest_qp = params->dest_qp;
  1752. if (GET_FIELD(params->modify_flags,
  1753. QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
  1754. /* Indicates that the following parameters have changed:
  1755. * Traffic class, flow label, hop limit, source GID,
  1756. * destination GID, loopback indicator
  1757. */
  1758. qp->traffic_class_tos = params->traffic_class_tos;
  1759. qp->flow_label = params->flow_label;
  1760. qp->hop_limit_ttl = params->hop_limit_ttl;
  1761. qp->sgid = params->sgid;
  1762. qp->dgid = params->dgid;
  1763. qp->udp_src_port = 0;
  1764. qp->vlan_id = params->vlan_id;
  1765. qp->mtu = params->mtu;
  1766. qp->lb_indication = params->lb_indication;
  1767. memcpy((u8 *)&qp->remote_mac_addr[0],
  1768. (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
  1769. if (params->use_local_mac) {
  1770. memcpy((u8 *)&qp->local_mac_addr[0],
  1771. (u8 *)&params->local_mac_addr[0], ETH_ALEN);
  1772. } else {
  1773. memcpy((u8 *)&qp->local_mac_addr[0],
  1774. (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
  1775. }
  1776. }
  1777. if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
  1778. qp->rq_psn = params->rq_psn;
  1779. if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
  1780. qp->sq_psn = params->sq_psn;
  1781. if (GET_FIELD(params->modify_flags,
  1782. QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
  1783. qp->max_rd_atomic_req = params->max_rd_atomic_req;
  1784. if (GET_FIELD(params->modify_flags,
  1785. QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
  1786. qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
  1787. if (GET_FIELD(params->modify_flags,
  1788. QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
  1789. qp->ack_timeout = params->ack_timeout;
  1790. if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
  1791. qp->retry_cnt = params->retry_cnt;
  1792. if (GET_FIELD(params->modify_flags,
  1793. QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
  1794. qp->rnr_retry_cnt = params->rnr_retry_cnt;
  1795. if (GET_FIELD(params->modify_flags,
  1796. QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
  1797. qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
  1798. qp->sqd_async = params->sqd_async;
  1799. prev_state = qp->cur_state;
  1800. if (GET_FIELD(params->modify_flags,
  1801. QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
  1802. qp->cur_state = params->new_state;
  1803. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
  1804. qp->cur_state);
  1805. }
  1806. rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
  1807. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
  1808. return rc;
  1809. }
  1810. static int
  1811. qed_rdma_register_tid(void *rdma_cxt,
  1812. struct qed_rdma_register_tid_in_params *params)
  1813. {
  1814. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  1815. struct rdma_register_tid_ramrod_data *p_ramrod;
  1816. struct qed_sp_init_data init_data;
  1817. struct qed_spq_entry *p_ent;
  1818. enum rdma_tid_type tid_type;
  1819. u8 fw_return_code;
  1820. int rc;
  1821. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
  1822. /* Get SPQ entry */
  1823. memset(&init_data, 0, sizeof(init_data));
  1824. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  1825. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  1826. rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
  1827. p_hwfn->p_rdma_info->proto, &init_data);
  1828. if (rc) {
  1829. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
  1830. return rc;
  1831. }
  1832. if (p_hwfn->p_rdma_info->last_tid < params->itid)
  1833. p_hwfn->p_rdma_info->last_tid = params->itid;
  1834. p_ramrod = &p_ent->ramrod.rdma_register_tid;
  1835. p_ramrod->flags = 0;
  1836. SET_FIELD(p_ramrod->flags,
  1837. RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
  1838. params->pbl_two_level);
  1839. SET_FIELD(p_ramrod->flags,
  1840. RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
  1841. SET_FIELD(p_ramrod->flags,
  1842. RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
  1843. /* Don't initialize D/C field, as it may override other bits. */
  1844. if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
  1845. SET_FIELD(p_ramrod->flags,
  1846. RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
  1847. params->page_size_log - 12);
  1848. SET_FIELD(p_ramrod->flags,
  1849. RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID,
  1850. p_hwfn->p_rdma_info->last_tid);
  1851. SET_FIELD(p_ramrod->flags,
  1852. RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
  1853. params->remote_read);
  1854. SET_FIELD(p_ramrod->flags,
  1855. RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
  1856. params->remote_write);
  1857. SET_FIELD(p_ramrod->flags,
  1858. RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
  1859. params->remote_atomic);
  1860. SET_FIELD(p_ramrod->flags,
  1861. RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
  1862. params->local_write);
  1863. SET_FIELD(p_ramrod->flags,
  1864. RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
  1865. SET_FIELD(p_ramrod->flags,
  1866. RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
  1867. params->mw_bind);
  1868. SET_FIELD(p_ramrod->flags1,
  1869. RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
  1870. params->pbl_page_size_log - 12);
  1871. SET_FIELD(p_ramrod->flags2,
  1872. RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
  1873. switch (params->tid_type) {
  1874. case QED_RDMA_TID_REGISTERED_MR:
  1875. tid_type = RDMA_TID_REGISTERED_MR;
  1876. break;
  1877. case QED_RDMA_TID_FMR:
  1878. tid_type = RDMA_TID_FMR;
  1879. break;
  1880. case QED_RDMA_TID_MW_TYPE1:
  1881. tid_type = RDMA_TID_MW_TYPE1;
  1882. break;
  1883. case QED_RDMA_TID_MW_TYPE2A:
  1884. tid_type = RDMA_TID_MW_TYPE2A;
  1885. break;
  1886. default:
  1887. rc = -EINVAL;
  1888. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
  1889. return rc;
  1890. }
  1891. SET_FIELD(p_ramrod->flags1,
  1892. RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
  1893. p_ramrod->itid = cpu_to_le32(params->itid);
  1894. p_ramrod->key = params->key;
  1895. p_ramrod->pd = cpu_to_le16(params->pd);
  1896. p_ramrod->length_hi = (u8)(params->length >> 32);
  1897. p_ramrod->length_lo = DMA_LO_LE(params->length);
  1898. if (params->zbva) {
  1899. /* Lower 32 bits of the registered MR address.
  1900. * In case of zero based MR, will hold FBO
  1901. */
  1902. p_ramrod->va.hi = 0;
  1903. p_ramrod->va.lo = cpu_to_le32(params->fbo);
  1904. } else {
  1905. DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
  1906. }
  1907. DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
  1908. /* DIF */
  1909. if (params->dif_enabled) {
  1910. SET_FIELD(p_ramrod->flags2,
  1911. RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
  1912. DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
  1913. params->dif_error_addr);
  1914. DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
  1915. }
  1916. rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
  1917. if (fw_return_code != RDMA_RETURN_OK) {
  1918. DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
  1919. return -EINVAL;
  1920. }
  1921. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
  1922. return rc;
  1923. }
  1924. static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
  1925. {
  1926. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  1927. struct rdma_deregister_tid_ramrod_data *p_ramrod;
  1928. struct qed_sp_init_data init_data;
  1929. struct qed_spq_entry *p_ent;
  1930. struct qed_ptt *p_ptt;
  1931. u8 fw_return_code;
  1932. int rc;
  1933. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
  1934. /* Get SPQ entry */
  1935. memset(&init_data, 0, sizeof(init_data));
  1936. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  1937. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  1938. rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
  1939. p_hwfn->p_rdma_info->proto, &init_data);
  1940. if (rc) {
  1941. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
  1942. return rc;
  1943. }
  1944. p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
  1945. p_ramrod->itid = cpu_to_le32(itid);
  1946. rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
  1947. if (rc) {
  1948. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
  1949. return rc;
  1950. }
  1951. if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
  1952. DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
  1953. return -EINVAL;
  1954. } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
  1955. /* Bit indicating that the TID is in use and a nig drain is
  1956. * required before sending the ramrod again
  1957. */
  1958. p_ptt = qed_ptt_acquire(p_hwfn);
  1959. if (!p_ptt) {
  1960. rc = -EBUSY;
  1961. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  1962. "Failed to acquire PTT\n");
  1963. return rc;
  1964. }
  1965. rc = qed_mcp_drain(p_hwfn, p_ptt);
  1966. if (rc) {
  1967. qed_ptt_release(p_hwfn, p_ptt);
  1968. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  1969. "Drain failed\n");
  1970. return rc;
  1971. }
  1972. qed_ptt_release(p_hwfn, p_ptt);
  1973. /* Resend the ramrod */
  1974. rc = qed_sp_init_request(p_hwfn, &p_ent,
  1975. RDMA_RAMROD_DEREGISTER_MR,
  1976. p_hwfn->p_rdma_info->proto,
  1977. &init_data);
  1978. if (rc) {
  1979. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  1980. "Failed to init sp-element\n");
  1981. return rc;
  1982. }
  1983. rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
  1984. if (rc) {
  1985. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  1986. "Ramrod failed\n");
  1987. return rc;
  1988. }
  1989. if (fw_return_code != RDMA_RETURN_OK) {
  1990. DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
  1991. fw_return_code);
  1992. return rc;
  1993. }
  1994. }
  1995. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
  1996. return rc;
  1997. }
  1998. static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
  1999. {
  2000. return QED_LEADING_HWFN(cdev);
  2001. }
  2002. static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  2003. {
  2004. u32 val;
  2005. val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
  2006. qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
  2007. DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
  2008. "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
  2009. val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
  2010. }
  2011. void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  2012. {
  2013. p_hwfn->db_bar_no_edpm = true;
  2014. qed_rdma_dpm_conf(p_hwfn, p_ptt);
  2015. }
  2016. static int qed_rdma_start(void *rdma_cxt,
  2017. struct qed_rdma_start_in_params *params)
  2018. {
  2019. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  2020. struct qed_ptt *p_ptt;
  2021. int rc = -EBUSY;
  2022. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  2023. "desired_cnq = %08x\n", params->desired_cnq);
  2024. p_ptt = qed_ptt_acquire(p_hwfn);
  2025. if (!p_ptt)
  2026. goto err;
  2027. rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
  2028. if (rc)
  2029. goto err1;
  2030. rc = qed_rdma_setup(p_hwfn, p_ptt, params);
  2031. if (rc)
  2032. goto err2;
  2033. qed_ptt_release(p_hwfn, p_ptt);
  2034. return rc;
  2035. err2:
  2036. qed_rdma_free(p_hwfn);
  2037. err1:
  2038. qed_ptt_release(p_hwfn, p_ptt);
  2039. err:
  2040. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
  2041. return rc;
  2042. }
  2043. static int qed_rdma_init(struct qed_dev *cdev,
  2044. struct qed_rdma_start_in_params *params)
  2045. {
  2046. return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
  2047. }
  2048. static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
  2049. {
  2050. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
  2051. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
  2052. spin_lock_bh(&p_hwfn->p_rdma_info->lock);
  2053. qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
  2054. spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
  2055. }
  2056. void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
  2057. u8 connection_handle,
  2058. void *cookie,
  2059. dma_addr_t first_frag_addr,
  2060. bool b_last_fragment, bool b_last_packet)
  2061. {
  2062. struct qed_roce_ll2_packet *packet = cookie;
  2063. struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
  2064. roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet);
  2065. }
  2066. void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
  2067. u8 connection_handle,
  2068. void *cookie,
  2069. dma_addr_t first_frag_addr,
  2070. bool b_last_fragment, bool b_last_packet)
  2071. {
  2072. qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle,
  2073. cookie, first_frag_addr,
  2074. b_last_fragment, b_last_packet);
  2075. }
  2076. void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
  2077. u8 connection_handle,
  2078. void *cookie,
  2079. dma_addr_t rx_buf_addr,
  2080. u16 data_length,
  2081. u8 data_length_error,
  2082. u16 parse_flags,
  2083. u16 vlan,
  2084. u32 src_mac_addr_hi,
  2085. u16 src_mac_addr_lo, bool b_last_packet)
  2086. {
  2087. struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
  2088. struct qed_roce_ll2_rx_params params;
  2089. struct qed_dev *cdev = p_hwfn->cdev;
  2090. struct qed_roce_ll2_packet pkt;
  2091. DP_VERBOSE(cdev,
  2092. QED_MSG_LL2,
  2093. "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
  2094. (void *)(uintptr_t)rx_buf_addr,
  2095. data_length, data_length_error);
  2096. memset(&pkt, 0, sizeof(pkt));
  2097. pkt.n_seg = 1;
  2098. pkt.payload[0].baddr = rx_buf_addr;
  2099. pkt.payload[0].len = data_length;
  2100. memset(&params, 0, sizeof(params));
  2101. params.vlan_id = vlan;
  2102. *((u32 *)&params.smac[0]) = ntohl(src_mac_addr_hi);
  2103. *((u16 *)&params.smac[4]) = ntohs(src_mac_addr_lo);
  2104. if (data_length_error) {
  2105. DP_ERR(cdev,
  2106. "roce ll2 rx complete: data length error %d, length=%d\n",
  2107. data_length_error, data_length);
  2108. params.rc = -EINVAL;
  2109. }
  2110. roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, &params);
  2111. }
  2112. static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
  2113. u8 *old_mac_address,
  2114. u8 *new_mac_address)
  2115. {
  2116. struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
  2117. struct qed_ptt *p_ptt;
  2118. int rc = 0;
  2119. if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) {
  2120. DP_ERR(cdev,
  2121. "qed roce mac filter failed - roce_info/ll2 NULL\n");
  2122. return -EINVAL;
  2123. }
  2124. p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
  2125. if (!p_ptt) {
  2126. DP_ERR(cdev,
  2127. "qed roce ll2 mac filter set: failed to acquire PTT\n");
  2128. return -EINVAL;
  2129. }
  2130. mutex_lock(&hwfn->ll2->lock);
  2131. if (old_mac_address)
  2132. qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
  2133. old_mac_address);
  2134. if (new_mac_address)
  2135. rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
  2136. new_mac_address);
  2137. mutex_unlock(&hwfn->ll2->lock);
  2138. qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
  2139. if (rc)
  2140. DP_ERR(cdev,
  2141. "qed roce ll2 mac filter set: failed to add mac filter\n");
  2142. return rc;
  2143. }
  2144. static int qed_roce_ll2_start(struct qed_dev *cdev,
  2145. struct qed_roce_ll2_params *params)
  2146. {
  2147. struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
  2148. struct qed_roce_ll2_info *roce_ll2;
  2149. struct qed_ll2_conn ll2_params;
  2150. int rc;
  2151. if (!params) {
  2152. DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n");
  2153. return -EINVAL;
  2154. }
  2155. if (!params->cbs.tx_cb || !params->cbs.rx_cb) {
  2156. DP_ERR(cdev,
  2157. "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
  2158. params->cbs.tx_cb, params->cbs.rx_cb);
  2159. return -EINVAL;
  2160. }
  2161. if (!is_valid_ether_addr(params->mac_address)) {
  2162. DP_ERR(cdev,
  2163. "qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
  2164. params->mac_address);
  2165. return -EINVAL;
  2166. }
  2167. /* Initialize */
  2168. roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC);
  2169. if (!roce_ll2) {
  2170. DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
  2171. return -ENOMEM;
  2172. }
  2173. roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
  2174. roce_ll2->cbs = params->cbs;
  2175. roce_ll2->cb_cookie = params->cb_cookie;
  2176. mutex_init(&roce_ll2->lock);
  2177. memset(&ll2_params, 0, sizeof(ll2_params));
  2178. ll2_params.conn_type = QED_LL2_TYPE_ROCE;
  2179. ll2_params.mtu = params->mtu;
  2180. ll2_params.rx_drop_ttl0_flg = true;
  2181. ll2_params.rx_vlan_removal_en = false;
  2182. ll2_params.tx_dest = CORE_TX_DEST_NW;
  2183. ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET;
  2184. ll2_params.ai_err_no_buf = LL2_DROP_PACKET;
  2185. ll2_params.gsi_enable = true;
  2186. rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params,
  2187. params->max_rx_buffers,
  2188. params->max_tx_buffers,
  2189. &roce_ll2->handle);
  2190. if (rc) {
  2191. DP_ERR(cdev,
  2192. "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
  2193. rc);
  2194. goto err;
  2195. }
  2196. rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
  2197. roce_ll2->handle);
  2198. if (rc) {
  2199. DP_ERR(cdev,
  2200. "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
  2201. rc);
  2202. goto err1;
  2203. }
  2204. hwfn->ll2 = roce_ll2;
  2205. rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address);
  2206. if (rc) {
  2207. hwfn->ll2 = NULL;
  2208. goto err2;
  2209. }
  2210. ether_addr_copy(roce_ll2->mac_address, params->mac_address);
  2211. return 0;
  2212. err2:
  2213. qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
  2214. err1:
  2215. qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
  2216. err:
  2217. kfree(roce_ll2);
  2218. return rc;
  2219. }
  2220. static int qed_roce_ll2_stop(struct qed_dev *cdev)
  2221. {
  2222. struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
  2223. struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
  2224. int rc;
  2225. if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
  2226. DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
  2227. return -EINVAL;
  2228. }
  2229. /* remove LL2 MAC address filter */
  2230. rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL);
  2231. eth_zero_addr(roce_ll2->mac_address);
  2232. rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
  2233. roce_ll2->handle);
  2234. if (rc)
  2235. DP_ERR(cdev,
  2236. "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n",
  2237. rc);
  2238. qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
  2239. roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
  2240. kfree(roce_ll2);
  2241. return rc;
  2242. }
  2243. static int qed_roce_ll2_tx(struct qed_dev *cdev,
  2244. struct qed_roce_ll2_packet *pkt,
  2245. struct qed_roce_ll2_tx_params *params)
  2246. {
  2247. struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
  2248. struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
  2249. enum qed_ll2_roce_flavor_type qed_roce_flavor;
  2250. u8 flags = 0;
  2251. int rc;
  2252. int i;
  2253. if (!pkt || !params) {
  2254. DP_ERR(cdev,
  2255. "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
  2256. cdev, pkt, params);
  2257. return -EINVAL;
  2258. }
  2259. qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE
  2260. : QED_LL2_RROCE;
  2261. if (pkt->roce_mode == ROCE_V2_IPV4)
  2262. flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
  2263. /* Tx header */
  2264. rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
  2265. 1 + pkt->n_seg, 0, flags, 0,
  2266. QED_LL2_TX_DEST_NW,
  2267. qed_roce_flavor, pkt->header.baddr,
  2268. pkt->header.len, pkt, 1);
  2269. if (rc) {
  2270. DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc);
  2271. return QED_ROCE_TX_HEAD_FAILURE;
  2272. }
  2273. /* Tx payload */
  2274. for (i = 0; i < pkt->n_seg; i++) {
  2275. rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
  2276. roce_ll2->handle,
  2277. pkt->payload[i].baddr,
  2278. pkt->payload[i].len);
  2279. if (rc) {
  2280. /* If failed not much to do here, partial packet has
  2281. * been posted * we can't free memory, will need to wait
  2282. * for completion
  2283. */
  2284. DP_ERR(cdev,
  2285. "roce ll2 tx: payload failed (rc=%d)\n", rc);
  2286. return QED_ROCE_TX_FRAG_FAILURE;
  2287. }
  2288. }
  2289. return 0;
  2290. }
  2291. static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev,
  2292. struct qed_roce_ll2_buffer *buf,
  2293. u64 cookie, u8 notify_fw)
  2294. {
  2295. return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
  2296. QED_LEADING_HWFN(cdev)->ll2->handle,
  2297. buf->baddr, buf->len,
  2298. (void *)(uintptr_t)cookie, notify_fw);
  2299. }
  2300. static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
  2301. {
  2302. struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
  2303. struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
  2304. return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
  2305. roce_ll2->handle, stats);
  2306. }
  2307. static const struct qed_rdma_ops qed_rdma_ops_pass = {
  2308. .common = &qed_common_ops_pass,
  2309. .fill_dev_info = &qed_fill_rdma_dev_info,
  2310. .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
  2311. .rdma_init = &qed_rdma_init,
  2312. .rdma_add_user = &qed_rdma_add_user,
  2313. .rdma_remove_user = &qed_rdma_remove_user,
  2314. .rdma_stop = &qed_rdma_stop,
  2315. .rdma_query_port = &qed_rdma_query_port,
  2316. .rdma_query_device = &qed_rdma_query_device,
  2317. .rdma_get_start_sb = &qed_rdma_get_sb_start,
  2318. .rdma_get_rdma_int = &qed_rdma_get_int,
  2319. .rdma_set_rdma_int = &qed_rdma_set_int,
  2320. .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
  2321. .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
  2322. .rdma_alloc_pd = &qed_rdma_alloc_pd,
  2323. .rdma_dealloc_pd = &qed_rdma_free_pd,
  2324. .rdma_create_cq = &qed_rdma_create_cq,
  2325. .rdma_destroy_cq = &qed_rdma_destroy_cq,
  2326. .rdma_create_qp = &qed_rdma_create_qp,
  2327. .rdma_modify_qp = &qed_rdma_modify_qp,
  2328. .rdma_query_qp = &qed_rdma_query_qp,
  2329. .rdma_destroy_qp = &qed_rdma_destroy_qp,
  2330. .rdma_alloc_tid = &qed_rdma_alloc_tid,
  2331. .rdma_free_tid = &qed_rdma_free_tid,
  2332. .rdma_register_tid = &qed_rdma_register_tid,
  2333. .rdma_deregister_tid = &qed_rdma_deregister_tid,
  2334. .roce_ll2_start = &qed_roce_ll2_start,
  2335. .roce_ll2_stop = &qed_roce_ll2_stop,
  2336. .roce_ll2_tx = &qed_roce_ll2_tx,
  2337. .roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer,
  2338. .roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
  2339. .roce_ll2_stats = &qed_roce_ll2_stats,
  2340. };
  2341. const struct qed_rdma_ops *qed_get_rdma_ops(void)
  2342. {
  2343. return &qed_rdma_ops_pass;
  2344. }
  2345. EXPORT_SYMBOL(qed_get_rdma_ops);