qed_iwarp.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/if_ether.h>
  33. #include <linux/if_vlan.h>
  34. #include <linux/ip.h>
  35. #include <linux/ipv6.h>
  36. #include <linux/spinlock.h>
  37. #include <linux/tcp.h>
  38. #include "qed_cxt.h"
  39. #include "qed_hw.h"
  40. #include "qed_ll2.h"
  41. #include "qed_rdma.h"
  42. #include "qed_reg_addr.h"
  43. #include "qed_sp.h"
  44. #define QED_IWARP_ORD_DEFAULT 32
  45. #define QED_IWARP_IRD_DEFAULT 32
  46. #define QED_IWARP_MAX_FW_MSS 4120
  47. #define QED_EP_SIG 0xecabcdef
  48. struct mpa_v2_hdr {
  49. __be16 ird;
  50. __be16 ord;
  51. };
  52. #define MPA_V2_PEER2PEER_MODEL 0x8000
  53. #define MPA_V2_SEND_RTR 0x4000 /* on ird */
  54. #define MPA_V2_READ_RTR 0x4000 /* on ord */
  55. #define MPA_V2_WRITE_RTR 0x8000
  56. #define MPA_V2_IRD_ORD_MASK 0x3FFF
  57. #define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
  58. #define QED_IWARP_INVALID_TCP_CID 0xffffffff
  59. #define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024)
  60. #define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024)
  61. #define TIMESTAMP_HEADER_SIZE (12)
  62. #define QED_IWARP_TS_EN BIT(0)
  63. #define QED_IWARP_DA_EN BIT(1)
  64. #define QED_IWARP_PARAM_CRC_NEEDED (1)
  65. #define QED_IWARP_PARAM_P2P (1)
  66. static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
  67. u8 fw_event_code, u16 echo,
  68. union event_ring_data *data,
  69. u8 fw_return_code);
  70. /* Override devinfo with iWARP specific values */
  71. void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
  72. {
  73. struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
  74. dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
  75. dev->max_qp = min_t(u32,
  76. IWARP_MAX_QPS,
  77. p_hwfn->p_rdma_info->num_qps) -
  78. QED_IWARP_PREALLOC_CNT;
  79. dev->max_cq = dev->max_qp;
  80. dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
  81. dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
  82. }
  83. void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  84. {
  85. p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
  86. qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
  87. p_hwfn->b_rdma_enabled_in_prs = true;
  88. }
  89. /* We have two cid maps, one for tcp which should be used only from passive
  90. * syn processing and replacing a pre-allocated ep in the list. The second
  91. * for active tcp and for QPs.
  92. */
  93. static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
  94. {
  95. cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
  96. spin_lock_bh(&p_hwfn->p_rdma_info->lock);
  97. if (cid < QED_IWARP_PREALLOC_CNT)
  98. qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
  99. cid);
  100. else
  101. qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
  102. spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
  103. }
  104. static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
  105. {
  106. int rc;
  107. spin_lock_bh(&p_hwfn->p_rdma_info->lock);
  108. rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
  109. spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
  110. if (rc) {
  111. DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
  112. return rc;
  113. }
  114. *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
  115. rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
  116. if (rc)
  117. qed_iwarp_cid_cleaned(p_hwfn, *cid);
  118. return rc;
  119. }
  120. static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
  121. {
  122. cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
  123. spin_lock_bh(&p_hwfn->p_rdma_info->lock);
  124. qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
  125. spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
  126. }
  127. /* This function allocates a cid for passive tcp (called from syn receive)
  128. * the reason it's separate from the regular cid allocation is because it
  129. * is assured that these cids already have ilt allocated. They are preallocated
  130. * to ensure that we won't need to allocate memory during syn processing
  131. */
  132. static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
  133. {
  134. int rc;
  135. spin_lock_bh(&p_hwfn->p_rdma_info->lock);
  136. rc = qed_rdma_bmap_alloc_id(p_hwfn,
  137. &p_hwfn->p_rdma_info->tcp_cid_map, cid);
  138. spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
  139. if (rc) {
  140. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  141. "can't allocate iwarp tcp cid max-count=%d\n",
  142. p_hwfn->p_rdma_info->tcp_cid_map.max_count);
  143. *cid = QED_IWARP_INVALID_TCP_CID;
  144. return rc;
  145. }
  146. *cid += qed_cxt_get_proto_cid_start(p_hwfn,
  147. p_hwfn->p_rdma_info->proto);
  148. return 0;
  149. }
  150. int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
  151. struct qed_rdma_qp *qp,
  152. struct qed_rdma_create_qp_out_params *out_params)
  153. {
  154. struct iwarp_create_qp_ramrod_data *p_ramrod;
  155. struct qed_sp_init_data init_data;
  156. struct qed_spq_entry *p_ent;
  157. u16 physical_queue;
  158. u32 cid;
  159. int rc;
  160. qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  161. IWARP_SHARED_QUEUE_PAGE_SIZE,
  162. &qp->shared_queue_phys_addr,
  163. GFP_KERNEL);
  164. if (!qp->shared_queue)
  165. return -ENOMEM;
  166. out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
  167. IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
  168. out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
  169. IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
  170. out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
  171. IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
  172. out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
  173. IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
  174. rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
  175. if (rc)
  176. goto err1;
  177. qp->icid = (u16)cid;
  178. memset(&init_data, 0, sizeof(init_data));
  179. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  180. init_data.cid = qp->icid;
  181. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  182. rc = qed_sp_init_request(p_hwfn, &p_ent,
  183. IWARP_RAMROD_CMD_ID_CREATE_QP,
  184. PROTOCOLID_IWARP, &init_data);
  185. if (rc)
  186. goto err2;
  187. p_ramrod = &p_ent->ramrod.iwarp_create_qp;
  188. SET_FIELD(p_ramrod->flags,
  189. IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
  190. qp->fmr_and_reserved_lkey);
  191. SET_FIELD(p_ramrod->flags,
  192. IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
  193. SET_FIELD(p_ramrod->flags,
  194. IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
  195. qp->incoming_rdma_read_en);
  196. SET_FIELD(p_ramrod->flags,
  197. IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
  198. qp->incoming_rdma_write_en);
  199. SET_FIELD(p_ramrod->flags,
  200. IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
  201. qp->incoming_atomic_en);
  202. SET_FIELD(p_ramrod->flags,
  203. IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
  204. p_ramrod->pd = qp->pd;
  205. p_ramrod->sq_num_pages = qp->sq_num_pages;
  206. p_ramrod->rq_num_pages = qp->rq_num_pages;
  207. p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
  208. p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
  209. p_ramrod->cq_cid_for_sq =
  210. cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
  211. p_ramrod->cq_cid_for_rq =
  212. cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
  213. p_ramrod->dpi = cpu_to_le16(qp->dpi);
  214. physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
  215. p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
  216. physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
  217. p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
  218. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  219. if (rc)
  220. goto err2;
  221. return rc;
  222. err2:
  223. qed_iwarp_cid_cleaned(p_hwfn, cid);
  224. err1:
  225. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  226. IWARP_SHARED_QUEUE_PAGE_SIZE,
  227. qp->shared_queue, qp->shared_queue_phys_addr);
  228. return rc;
  229. }
  230. static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
  231. {
  232. struct iwarp_modify_qp_ramrod_data *p_ramrod;
  233. struct qed_sp_init_data init_data;
  234. struct qed_spq_entry *p_ent;
  235. int rc;
  236. /* Get SPQ entry */
  237. memset(&init_data, 0, sizeof(init_data));
  238. init_data.cid = qp->icid;
  239. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  240. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  241. rc = qed_sp_init_request(p_hwfn, &p_ent,
  242. IWARP_RAMROD_CMD_ID_MODIFY_QP,
  243. p_hwfn->p_rdma_info->proto, &init_data);
  244. if (rc)
  245. return rc;
  246. p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
  247. SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
  248. 0x1);
  249. if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
  250. p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
  251. else
  252. p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
  253. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  254. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
  255. return rc;
  256. }
  257. enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
  258. {
  259. switch (state) {
  260. case QED_ROCE_QP_STATE_RESET:
  261. case QED_ROCE_QP_STATE_INIT:
  262. case QED_ROCE_QP_STATE_RTR:
  263. return QED_IWARP_QP_STATE_IDLE;
  264. case QED_ROCE_QP_STATE_RTS:
  265. return QED_IWARP_QP_STATE_RTS;
  266. case QED_ROCE_QP_STATE_SQD:
  267. return QED_IWARP_QP_STATE_CLOSING;
  268. case QED_ROCE_QP_STATE_ERR:
  269. return QED_IWARP_QP_STATE_ERROR;
  270. case QED_ROCE_QP_STATE_SQE:
  271. return QED_IWARP_QP_STATE_TERMINATE;
  272. default:
  273. return QED_IWARP_QP_STATE_ERROR;
  274. }
  275. }
  276. static enum qed_roce_qp_state
  277. qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
  278. {
  279. switch (state) {
  280. case QED_IWARP_QP_STATE_IDLE:
  281. return QED_ROCE_QP_STATE_INIT;
  282. case QED_IWARP_QP_STATE_RTS:
  283. return QED_ROCE_QP_STATE_RTS;
  284. case QED_IWARP_QP_STATE_TERMINATE:
  285. return QED_ROCE_QP_STATE_SQE;
  286. case QED_IWARP_QP_STATE_CLOSING:
  287. return QED_ROCE_QP_STATE_SQD;
  288. case QED_IWARP_QP_STATE_ERROR:
  289. return QED_ROCE_QP_STATE_ERR;
  290. default:
  291. return QED_ROCE_QP_STATE_ERR;
  292. }
  293. }
  294. const char *iwarp_state_names[] = {
  295. "IDLE",
  296. "RTS",
  297. "TERMINATE",
  298. "CLOSING",
  299. "ERROR",
  300. };
  301. int
  302. qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
  303. struct qed_rdma_qp *qp,
  304. enum qed_iwarp_qp_state new_state, bool internal)
  305. {
  306. enum qed_iwarp_qp_state prev_iw_state;
  307. bool modify_fw = false;
  308. int rc = 0;
  309. /* modify QP can be called from upper-layer or as a result of async
  310. * RST/FIN... therefore need to protect
  311. */
  312. spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
  313. prev_iw_state = qp->iwarp_state;
  314. if (prev_iw_state == new_state) {
  315. spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
  316. return 0;
  317. }
  318. switch (prev_iw_state) {
  319. case QED_IWARP_QP_STATE_IDLE:
  320. switch (new_state) {
  321. case QED_IWARP_QP_STATE_RTS:
  322. qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
  323. break;
  324. case QED_IWARP_QP_STATE_ERROR:
  325. qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
  326. if (!internal)
  327. modify_fw = true;
  328. break;
  329. default:
  330. break;
  331. }
  332. break;
  333. case QED_IWARP_QP_STATE_RTS:
  334. switch (new_state) {
  335. case QED_IWARP_QP_STATE_CLOSING:
  336. if (!internal)
  337. modify_fw = true;
  338. qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
  339. break;
  340. case QED_IWARP_QP_STATE_ERROR:
  341. if (!internal)
  342. modify_fw = true;
  343. qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
  344. break;
  345. default:
  346. break;
  347. }
  348. break;
  349. case QED_IWARP_QP_STATE_ERROR:
  350. switch (new_state) {
  351. case QED_IWARP_QP_STATE_IDLE:
  352. qp->iwarp_state = new_state;
  353. break;
  354. case QED_IWARP_QP_STATE_CLOSING:
  355. /* could happen due to race... do nothing.... */
  356. break;
  357. default:
  358. rc = -EINVAL;
  359. }
  360. break;
  361. case QED_IWARP_QP_STATE_TERMINATE:
  362. case QED_IWARP_QP_STATE_CLOSING:
  363. qp->iwarp_state = new_state;
  364. break;
  365. default:
  366. break;
  367. }
  368. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
  369. qp->icid,
  370. iwarp_state_names[prev_iw_state],
  371. iwarp_state_names[qp->iwarp_state],
  372. internal ? "internal" : "");
  373. spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
  374. if (modify_fw)
  375. rc = qed_iwarp_modify_fw(p_hwfn, qp);
  376. return rc;
  377. }
  378. int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
  379. {
  380. struct qed_sp_init_data init_data;
  381. struct qed_spq_entry *p_ent;
  382. int rc;
  383. /* Get SPQ entry */
  384. memset(&init_data, 0, sizeof(init_data));
  385. init_data.cid = qp->icid;
  386. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  387. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  388. rc = qed_sp_init_request(p_hwfn, &p_ent,
  389. IWARP_RAMROD_CMD_ID_DESTROY_QP,
  390. p_hwfn->p_rdma_info->proto, &init_data);
  391. if (rc)
  392. return rc;
  393. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  394. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
  395. return rc;
  396. }
  397. static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
  398. struct qed_iwarp_ep *ep,
  399. bool remove_from_active_list)
  400. {
  401. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  402. sizeof(*ep->ep_buffer_virt),
  403. ep->ep_buffer_virt, ep->ep_buffer_phys);
  404. if (remove_from_active_list) {
  405. spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  406. list_del(&ep->list_entry);
  407. spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  408. }
  409. if (ep->qp)
  410. ep->qp->ep = NULL;
  411. kfree(ep);
  412. }
  413. int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
  414. {
  415. struct qed_iwarp_ep *ep = qp->ep;
  416. int wait_count = 0;
  417. int rc = 0;
  418. if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
  419. rc = qed_iwarp_modify_qp(p_hwfn, qp,
  420. QED_IWARP_QP_STATE_ERROR, false);
  421. if (rc)
  422. return rc;
  423. }
  424. /* Make sure ep is closed before returning and freeing memory. */
  425. if (ep) {
  426. while (ep->state != QED_IWARP_EP_CLOSED && wait_count++ < 200)
  427. msleep(100);
  428. if (ep->state != QED_IWARP_EP_CLOSED)
  429. DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
  430. ep->state);
  431. qed_iwarp_destroy_ep(p_hwfn, ep, false);
  432. }
  433. rc = qed_iwarp_fw_destroy(p_hwfn, qp);
  434. if (qp->shared_queue)
  435. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  436. IWARP_SHARED_QUEUE_PAGE_SIZE,
  437. qp->shared_queue, qp->shared_queue_phys_addr);
  438. return rc;
  439. }
  440. static int
  441. qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
  442. {
  443. struct qed_iwarp_ep *ep;
  444. int rc;
  445. ep = kzalloc(sizeof(*ep), GFP_KERNEL);
  446. if (!ep)
  447. return -ENOMEM;
  448. ep->state = QED_IWARP_EP_INIT;
  449. ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  450. sizeof(*ep->ep_buffer_virt),
  451. &ep->ep_buffer_phys,
  452. GFP_KERNEL);
  453. if (!ep->ep_buffer_virt) {
  454. rc = -ENOMEM;
  455. goto err;
  456. }
  457. ep->sig = QED_EP_SIG;
  458. *ep_out = ep;
  459. return 0;
  460. err:
  461. kfree(ep);
  462. return rc;
  463. }
  464. static void
  465. qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
  466. struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
  467. {
  468. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
  469. p_tcp_ramrod->tcp.local_mac_addr_lo,
  470. p_tcp_ramrod->tcp.local_mac_addr_mid,
  471. p_tcp_ramrod->tcp.local_mac_addr_hi,
  472. p_tcp_ramrod->tcp.remote_mac_addr_lo,
  473. p_tcp_ramrod->tcp.remote_mac_addr_mid,
  474. p_tcp_ramrod->tcp.remote_mac_addr_hi);
  475. if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
  476. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  477. "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n",
  478. p_tcp_ramrod->tcp.local_ip,
  479. p_tcp_ramrod->tcp.local_port,
  480. p_tcp_ramrod->tcp.remote_ip,
  481. p_tcp_ramrod->tcp.remote_port,
  482. p_tcp_ramrod->tcp.vlan_id);
  483. } else {
  484. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  485. "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n",
  486. p_tcp_ramrod->tcp.local_ip,
  487. p_tcp_ramrod->tcp.local_port,
  488. p_tcp_ramrod->tcp.remote_ip,
  489. p_tcp_ramrod->tcp.remote_port,
  490. p_tcp_ramrod->tcp.vlan_id);
  491. }
  492. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  493. "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
  494. p_tcp_ramrod->tcp.flow_label,
  495. p_tcp_ramrod->tcp.ttl,
  496. p_tcp_ramrod->tcp.tos_or_tc,
  497. p_tcp_ramrod->tcp.mss,
  498. p_tcp_ramrod->tcp.rcv_wnd_scale,
  499. p_tcp_ramrod->tcp.connect_mode,
  500. p_tcp_ramrod->tcp.flags);
  501. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
  502. p_tcp_ramrod->tcp.syn_ip_payload_length,
  503. p_tcp_ramrod->tcp.syn_phy_addr_lo,
  504. p_tcp_ramrod->tcp.syn_phy_addr_hi);
  505. }
  506. static int
  507. qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
  508. {
  509. struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
  510. struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
  511. struct tcp_offload_params_opt2 *tcp;
  512. struct qed_sp_init_data init_data;
  513. struct qed_spq_entry *p_ent;
  514. dma_addr_t async_output_phys;
  515. dma_addr_t in_pdata_phys;
  516. u16 physical_q;
  517. u8 tcp_flags;
  518. int rc;
  519. int i;
  520. memset(&init_data, 0, sizeof(init_data));
  521. init_data.cid = ep->tcp_cid;
  522. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  523. if (ep->connect_mode == TCP_CONNECT_PASSIVE)
  524. init_data.comp_mode = QED_SPQ_MODE_CB;
  525. else
  526. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  527. rc = qed_sp_init_request(p_hwfn, &p_ent,
  528. IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
  529. PROTOCOLID_IWARP, &init_data);
  530. if (rc)
  531. return rc;
  532. p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
  533. in_pdata_phys = ep->ep_buffer_phys +
  534. offsetof(struct qed_iwarp_ep_memory, in_pdata);
  535. DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
  536. in_pdata_phys);
  537. p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
  538. cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
  539. async_output_phys = ep->ep_buffer_phys +
  540. offsetof(struct qed_iwarp_ep_memory, async_output);
  541. DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
  542. async_output_phys);
  543. p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
  544. p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
  545. physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
  546. p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
  547. physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
  548. p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
  549. p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
  550. tcp = &p_tcp_ramrod->tcp;
  551. qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
  552. &tcp->remote_mac_addr_mid,
  553. &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
  554. qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
  555. &tcp->local_mac_addr_lo, ep->local_mac_addr);
  556. tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
  557. tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
  558. tcp->flags = 0;
  559. SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
  560. !!(tcp_flags & QED_IWARP_TS_EN));
  561. SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
  562. !!(tcp_flags & QED_IWARP_DA_EN));
  563. tcp->ip_version = ep->cm_info.ip_version;
  564. for (i = 0; i < 4; i++) {
  565. tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
  566. tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
  567. }
  568. tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
  569. tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
  570. tcp->mss = cpu_to_le16(ep->mss);
  571. tcp->flow_label = 0;
  572. tcp->ttl = 0x40;
  573. tcp->tos_or_tc = 0;
  574. tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
  575. tcp->connect_mode = ep->connect_mode;
  576. if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
  577. tcp->syn_ip_payload_length =
  578. cpu_to_le16(ep->syn_ip_payload_length);
  579. tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
  580. tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
  581. }
  582. qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
  583. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  584. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  585. "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
  586. return rc;
  587. }
  588. static void
  589. qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
  590. {
  591. struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
  592. struct qed_iwarp_cm_event_params params;
  593. struct mpa_v2_hdr *mpa_v2;
  594. union async_output *async_data;
  595. u16 mpa_ord, mpa_ird;
  596. u8 mpa_hdr_size = 0;
  597. u8 mpa_rev;
  598. async_data = &ep->ep_buffer_virt->async_output;
  599. mpa_rev = async_data->mpa_request.mpa_handshake_mode;
  600. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  601. "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
  602. async_data->mpa_request.ulp_data_len,
  603. mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
  604. if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
  605. /* Read ord/ird values from private data buffer */
  606. mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
  607. mpa_hdr_size = sizeof(*mpa_v2);
  608. mpa_ord = ntohs(mpa_v2->ord);
  609. mpa_ird = ntohs(mpa_v2->ird);
  610. /* Temprary store in cm_info incoming ord/ird requested, later
  611. * replace with negotiated value during accept
  612. */
  613. ep->cm_info.ord = (u8)min_t(u16,
  614. (mpa_ord & MPA_V2_IRD_ORD_MASK),
  615. QED_IWARP_ORD_DEFAULT);
  616. ep->cm_info.ird = (u8)min_t(u16,
  617. (mpa_ird & MPA_V2_IRD_ORD_MASK),
  618. QED_IWARP_IRD_DEFAULT);
  619. /* Peer2Peer negotiation */
  620. ep->rtr_type = MPA_RTR_TYPE_NONE;
  621. if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
  622. if (mpa_ord & MPA_V2_WRITE_RTR)
  623. ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
  624. if (mpa_ord & MPA_V2_READ_RTR)
  625. ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
  626. if (mpa_ird & MPA_V2_SEND_RTR)
  627. ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
  628. ep->rtr_type &= iwarp_info->rtr_type;
  629. /* if we're left with no match send our capabilities */
  630. if (ep->rtr_type == MPA_RTR_TYPE_NONE)
  631. ep->rtr_type = iwarp_info->rtr_type;
  632. }
  633. ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
  634. } else {
  635. ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
  636. ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
  637. ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
  638. }
  639. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  640. "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
  641. mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
  642. async_data->mpa_request.ulp_data_len, mpa_hdr_size);
  643. /* Strip mpa v2 hdr from private data before sending to upper layer */
  644. ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
  645. ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len -
  646. mpa_hdr_size;
  647. params.event = QED_IWARP_EVENT_MPA_REQUEST;
  648. params.cm_info = &ep->cm_info;
  649. params.ep_context = ep;
  650. params.status = 0;
  651. ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
  652. ep->event_cb(ep->cb_context, &params);
  653. }
  654. static int
  655. qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
  656. {
  657. struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
  658. struct qed_sp_init_data init_data;
  659. dma_addr_t async_output_phys;
  660. struct qed_spq_entry *p_ent;
  661. dma_addr_t out_pdata_phys;
  662. dma_addr_t in_pdata_phys;
  663. struct qed_rdma_qp *qp;
  664. bool reject;
  665. int rc;
  666. if (!ep)
  667. return -EINVAL;
  668. qp = ep->qp;
  669. reject = !qp;
  670. memset(&init_data, 0, sizeof(init_data));
  671. init_data.cid = reject ? ep->tcp_cid : qp->icid;
  672. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  673. if (ep->connect_mode == TCP_CONNECT_ACTIVE)
  674. init_data.comp_mode = QED_SPQ_MODE_CB;
  675. else
  676. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  677. rc = qed_sp_init_request(p_hwfn, &p_ent,
  678. IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
  679. PROTOCOLID_IWARP, &init_data);
  680. if (rc)
  681. return rc;
  682. p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
  683. out_pdata_phys = ep->ep_buffer_phys +
  684. offsetof(struct qed_iwarp_ep_memory, out_pdata);
  685. DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr,
  686. out_pdata_phys);
  687. p_mpa_ramrod->common.outgoing_ulp_buffer.len =
  688. ep->cm_info.private_data_len;
  689. p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
  690. p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
  691. p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
  692. p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
  693. in_pdata_phys = ep->ep_buffer_phys +
  694. offsetof(struct qed_iwarp_ep_memory, in_pdata);
  695. p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
  696. DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
  697. in_pdata_phys);
  698. p_mpa_ramrod->incoming_ulp_buffer.len =
  699. cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
  700. async_output_phys = ep->ep_buffer_phys +
  701. offsetof(struct qed_iwarp_ep_memory, async_output);
  702. DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
  703. async_output_phys);
  704. p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
  705. p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
  706. if (!reject) {
  707. DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
  708. qp->shared_queue_phys_addr);
  709. p_mpa_ramrod->stats_counter_id =
  710. RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
  711. } else {
  712. p_mpa_ramrod->common.reject = 1;
  713. }
  714. p_mpa_ramrod->mode = ep->mpa_rev;
  715. SET_FIELD(p_mpa_ramrod->rtr_pref,
  716. IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
  717. ep->state = QED_IWARP_EP_MPA_OFFLOADED;
  718. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  719. if (!reject)
  720. ep->cid = qp->icid; /* Now they're migrated. */
  721. DP_VERBOSE(p_hwfn,
  722. QED_MSG_RDMA,
  723. "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
  724. reject ? 0xffff : qp->icid,
  725. ep->tcp_cid,
  726. rc,
  727. ep->cm_info.ird,
  728. ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
  729. return rc;
  730. }
  731. static void
  732. qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
  733. {
  734. ep->state = QED_IWARP_EP_INIT;
  735. if (ep->qp)
  736. ep->qp->ep = NULL;
  737. ep->qp = NULL;
  738. memset(&ep->cm_info, 0, sizeof(ep->cm_info));
  739. if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
  740. /* We don't care about the return code, it's ok if tcp_cid
  741. * remains invalid...in this case we'll defer allocation
  742. */
  743. qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
  744. }
  745. spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  746. list_del(&ep->list_entry);
  747. list_add_tail(&ep->list_entry,
  748. &p_hwfn->p_rdma_info->iwarp.ep_free_list);
  749. spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  750. }
  751. void
  752. qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
  753. {
  754. struct mpa_v2_hdr *mpa_v2_params;
  755. union async_output *async_data;
  756. u16 mpa_ird, mpa_ord;
  757. u8 mpa_data_size = 0;
  758. if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
  759. mpa_v2_params =
  760. (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
  761. mpa_data_size = sizeof(*mpa_v2_params);
  762. mpa_ird = ntohs(mpa_v2_params->ird);
  763. mpa_ord = ntohs(mpa_v2_params->ord);
  764. ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
  765. ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
  766. }
  767. async_data = &ep->ep_buffer_virt->async_output;
  768. ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
  769. ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len -
  770. mpa_data_size;
  771. }
  772. void
  773. qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
  774. {
  775. struct qed_iwarp_cm_event_params params;
  776. if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
  777. DP_NOTICE(p_hwfn,
  778. "MPA reply event not expected on passive side!\n");
  779. return;
  780. }
  781. params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
  782. qed_iwarp_parse_private_data(p_hwfn, ep);
  783. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  784. "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
  785. ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
  786. params.cm_info = &ep->cm_info;
  787. params.ep_context = ep;
  788. params.status = 0;
  789. ep->mpa_reply_processed = true;
  790. ep->event_cb(ep->cb_context, &params);
  791. }
  792. #define QED_IWARP_CONNECT_MODE_STRING(ep) \
  793. ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
  794. /* Called as a result of the event:
  795. * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
  796. */
  797. static void
  798. qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
  799. struct qed_iwarp_ep *ep, u8 fw_return_code)
  800. {
  801. struct qed_iwarp_cm_event_params params;
  802. if (ep->connect_mode == TCP_CONNECT_ACTIVE)
  803. params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
  804. else
  805. params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
  806. if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
  807. qed_iwarp_parse_private_data(p_hwfn, ep);
  808. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  809. "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
  810. ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
  811. params.cm_info = &ep->cm_info;
  812. params.ep_context = ep;
  813. ep->state = QED_IWARP_EP_CLOSED;
  814. switch (fw_return_code) {
  815. case RDMA_RETURN_OK:
  816. ep->qp->max_rd_atomic_req = ep->cm_info.ord;
  817. ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
  818. qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
  819. ep->state = QED_IWARP_EP_ESTABLISHED;
  820. params.status = 0;
  821. break;
  822. case IWARP_CONN_ERROR_MPA_TIMEOUT:
  823. DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
  824. QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
  825. params.status = -EBUSY;
  826. break;
  827. case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
  828. DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
  829. QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
  830. params.status = -ECONNREFUSED;
  831. break;
  832. case IWARP_CONN_ERROR_MPA_RST:
  833. DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
  834. QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
  835. ep->tcp_cid);
  836. params.status = -ECONNRESET;
  837. break;
  838. case IWARP_CONN_ERROR_MPA_FIN:
  839. DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
  840. QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
  841. params.status = -ECONNREFUSED;
  842. break;
  843. case IWARP_CONN_ERROR_MPA_INSUF_IRD:
  844. DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
  845. QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
  846. params.status = -ECONNREFUSED;
  847. break;
  848. case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
  849. DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
  850. QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
  851. params.status = -ECONNREFUSED;
  852. break;
  853. case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
  854. DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
  855. QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
  856. params.status = -ECONNREFUSED;
  857. break;
  858. case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
  859. DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
  860. QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
  861. params.status = -ECONNREFUSED;
  862. break;
  863. case IWARP_CONN_ERROR_MPA_TERMINATE:
  864. DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
  865. QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
  866. params.status = -ECONNREFUSED;
  867. break;
  868. default:
  869. params.status = -ECONNRESET;
  870. break;
  871. }
  872. ep->event_cb(ep->cb_context, &params);
  873. /* on passive side, if there is no associated QP (REJECT) we need to
  874. * return the ep to the pool, (in the regular case we add an element
  875. * in accept instead of this one.
  876. * In both cases we need to remove it from the ep_list.
  877. */
  878. if (fw_return_code != RDMA_RETURN_OK) {
  879. ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
  880. if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
  881. (!ep->qp)) { /* Rejected */
  882. qed_iwarp_return_ep(p_hwfn, ep);
  883. } else {
  884. spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  885. list_del(&ep->list_entry);
  886. spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  887. }
  888. }
  889. }
  890. static void
  891. qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
  892. struct qed_iwarp_ep *ep, u8 *mpa_data_size)
  893. {
  894. struct mpa_v2_hdr *mpa_v2_params;
  895. u16 mpa_ird, mpa_ord;
  896. *mpa_data_size = 0;
  897. if (MPA_REV2(ep->mpa_rev)) {
  898. mpa_v2_params =
  899. (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
  900. *mpa_data_size = sizeof(*mpa_v2_params);
  901. mpa_ird = (u16)ep->cm_info.ird;
  902. mpa_ord = (u16)ep->cm_info.ord;
  903. if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
  904. mpa_ird |= MPA_V2_PEER2PEER_MODEL;
  905. if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
  906. mpa_ird |= MPA_V2_SEND_RTR;
  907. if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
  908. mpa_ord |= MPA_V2_WRITE_RTR;
  909. if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
  910. mpa_ord |= MPA_V2_READ_RTR;
  911. }
  912. mpa_v2_params->ird = htons(mpa_ird);
  913. mpa_v2_params->ord = htons(mpa_ord);
  914. DP_VERBOSE(p_hwfn,
  915. QED_MSG_RDMA,
  916. "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
  917. mpa_v2_params->ird,
  918. mpa_v2_params->ord,
  919. *((u32 *)mpa_v2_params),
  920. mpa_ord & MPA_V2_IRD_ORD_MASK,
  921. mpa_ird & MPA_V2_IRD_ORD_MASK,
  922. !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
  923. !!(mpa_ird & MPA_V2_SEND_RTR),
  924. !!(mpa_ord & MPA_V2_WRITE_RTR),
  925. !!(mpa_ord & MPA_V2_READ_RTR));
  926. }
  927. }
  928. int qed_iwarp_connect(void *rdma_cxt,
  929. struct qed_iwarp_connect_in *iparams,
  930. struct qed_iwarp_connect_out *oparams)
  931. {
  932. struct qed_hwfn *p_hwfn = rdma_cxt;
  933. struct qed_iwarp_info *iwarp_info;
  934. struct qed_iwarp_ep *ep;
  935. u8 mpa_data_size = 0;
  936. u8 ts_hdr_size = 0;
  937. u32 cid;
  938. int rc;
  939. if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
  940. (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
  941. DP_NOTICE(p_hwfn,
  942. "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
  943. iparams->qp->icid, iparams->cm_info.ord,
  944. iparams->cm_info.ird);
  945. return -EINVAL;
  946. }
  947. iwarp_info = &p_hwfn->p_rdma_info->iwarp;
  948. /* Allocate ep object */
  949. rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
  950. if (rc)
  951. return rc;
  952. rc = qed_iwarp_create_ep(p_hwfn, &ep);
  953. if (rc)
  954. goto err;
  955. ep->tcp_cid = cid;
  956. spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  957. list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
  958. spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  959. ep->qp = iparams->qp;
  960. ep->qp->ep = ep;
  961. ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
  962. ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
  963. memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
  964. ep->cm_info.ord = iparams->cm_info.ord;
  965. ep->cm_info.ird = iparams->cm_info.ird;
  966. ep->rtr_type = iwarp_info->rtr_type;
  967. if (!iwarp_info->peer2peer)
  968. ep->rtr_type = MPA_RTR_TYPE_NONE;
  969. if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
  970. ep->cm_info.ord = 1;
  971. ep->mpa_rev = iwarp_info->mpa_rev;
  972. qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
  973. ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
  974. ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
  975. mpa_data_size;
  976. memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
  977. iparams->cm_info.private_data,
  978. iparams->cm_info.private_data_len);
  979. if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
  980. ts_hdr_size = TIMESTAMP_HEADER_SIZE;
  981. ep->mss = iparams->mss - ts_hdr_size;
  982. ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
  983. ep->event_cb = iparams->event_cb;
  984. ep->cb_context = iparams->cb_context;
  985. ep->connect_mode = TCP_CONNECT_ACTIVE;
  986. oparams->ep_context = ep;
  987. rc = qed_iwarp_tcp_offload(p_hwfn, ep);
  988. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
  989. iparams->qp->icid, ep->tcp_cid, rc);
  990. if (rc) {
  991. qed_iwarp_destroy_ep(p_hwfn, ep, true);
  992. goto err;
  993. }
  994. return rc;
  995. err:
  996. qed_iwarp_cid_cleaned(p_hwfn, cid);
  997. return rc;
  998. }
  999. static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
  1000. {
  1001. struct qed_iwarp_ep *ep = NULL;
  1002. int rc;
  1003. spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1004. if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
  1005. DP_ERR(p_hwfn, "Ep list is empty\n");
  1006. goto out;
  1007. }
  1008. ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
  1009. struct qed_iwarp_ep, list_entry);
  1010. /* in some cases we could have failed allocating a tcp cid when added
  1011. * from accept / failure... retry now..this is not the common case.
  1012. */
  1013. if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
  1014. rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
  1015. /* if we fail we could look for another entry with a valid
  1016. * tcp_cid, but since we don't expect to reach this anyway
  1017. * it's not worth the handling
  1018. */
  1019. if (rc) {
  1020. ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
  1021. ep = NULL;
  1022. goto out;
  1023. }
  1024. }
  1025. list_del(&ep->list_entry);
  1026. out:
  1027. spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1028. return ep;
  1029. }
  1030. #define QED_IWARP_MAX_CID_CLEAN_TIME 100
  1031. #define QED_IWARP_MAX_NO_PROGRESS_CNT 5
  1032. /* This function waits for all the bits of a bmap to be cleared, as long as
  1033. * there is progress ( i.e. the number of bits left to be cleared decreases )
  1034. * the function continues.
  1035. */
  1036. static int
  1037. qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
  1038. {
  1039. int prev_weight = 0;
  1040. int wait_count = 0;
  1041. int weight = 0;
  1042. weight = bitmap_weight(bmap->bitmap, bmap->max_count);
  1043. prev_weight = weight;
  1044. while (weight) {
  1045. msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
  1046. weight = bitmap_weight(bmap->bitmap, bmap->max_count);
  1047. if (prev_weight == weight) {
  1048. wait_count++;
  1049. } else {
  1050. prev_weight = weight;
  1051. wait_count = 0;
  1052. }
  1053. if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
  1054. DP_NOTICE(p_hwfn,
  1055. "%s bitmap wait timed out (%d cids pending)\n",
  1056. bmap->name, weight);
  1057. return -EBUSY;
  1058. }
  1059. }
  1060. return 0;
  1061. }
  1062. static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
  1063. {
  1064. int rc;
  1065. int i;
  1066. rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
  1067. &p_hwfn->p_rdma_info->tcp_cid_map);
  1068. if (rc)
  1069. return rc;
  1070. /* Now free the tcp cids from the main cid map */
  1071. for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
  1072. qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
  1073. /* Now wait for all cids to be completed */
  1074. return qed_iwarp_wait_cid_map_cleared(p_hwfn,
  1075. &p_hwfn->p_rdma_info->cid_map);
  1076. }
  1077. static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
  1078. {
  1079. struct qed_iwarp_ep *ep;
  1080. while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
  1081. spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1082. ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
  1083. struct qed_iwarp_ep, list_entry);
  1084. if (!ep) {
  1085. spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1086. break;
  1087. }
  1088. list_del(&ep->list_entry);
  1089. spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1090. if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
  1091. qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
  1092. qed_iwarp_destroy_ep(p_hwfn, ep, false);
  1093. }
  1094. }
  1095. static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
  1096. {
  1097. struct qed_iwarp_ep *ep;
  1098. int rc = 0;
  1099. int count;
  1100. u32 cid;
  1101. int i;
  1102. count = init ? QED_IWARP_PREALLOC_CNT : 1;
  1103. for (i = 0; i < count; i++) {
  1104. rc = qed_iwarp_create_ep(p_hwfn, &ep);
  1105. if (rc)
  1106. return rc;
  1107. /* During initialization we allocate from the main pool,
  1108. * afterwards we allocate only from the tcp_cid.
  1109. */
  1110. if (init) {
  1111. rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
  1112. if (rc)
  1113. goto err;
  1114. qed_iwarp_set_tcp_cid(p_hwfn, cid);
  1115. } else {
  1116. /* We don't care about the return code, it's ok if
  1117. * tcp_cid remains invalid...in this case we'll
  1118. * defer allocation
  1119. */
  1120. qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
  1121. }
  1122. ep->tcp_cid = cid;
  1123. spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1124. list_add_tail(&ep->list_entry,
  1125. &p_hwfn->p_rdma_info->iwarp.ep_free_list);
  1126. spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1127. }
  1128. return rc;
  1129. err:
  1130. qed_iwarp_destroy_ep(p_hwfn, ep, false);
  1131. return rc;
  1132. }
  1133. int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
  1134. {
  1135. int rc;
  1136. /* Allocate bitmap for tcp cid. These are used by passive side
  1137. * to ensure it can allocate a tcp cid during dpc that was
  1138. * pre-acquired and doesn't require dynamic allocation of ilt
  1139. */
  1140. rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
  1141. QED_IWARP_PREALLOC_CNT, "TCP_CID");
  1142. if (rc) {
  1143. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  1144. "Failed to allocate tcp cid, rc = %d\n", rc);
  1145. return rc;
  1146. }
  1147. INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
  1148. spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1149. return qed_iwarp_prealloc_ep(p_hwfn, true);
  1150. }
  1151. void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
  1152. {
  1153. qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
  1154. }
  1155. int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
  1156. {
  1157. struct qed_hwfn *p_hwfn = rdma_cxt;
  1158. struct qed_iwarp_ep *ep;
  1159. u8 mpa_data_size = 0;
  1160. int rc;
  1161. ep = iparams->ep_context;
  1162. if (!ep) {
  1163. DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
  1164. return -EINVAL;
  1165. }
  1166. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
  1167. iparams->qp->icid, ep->tcp_cid);
  1168. if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
  1169. (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
  1170. DP_VERBOSE(p_hwfn,
  1171. QED_MSG_RDMA,
  1172. "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
  1173. iparams->qp->icid,
  1174. ep->tcp_cid, iparams->ord, iparams->ord);
  1175. return -EINVAL;
  1176. }
  1177. qed_iwarp_prealloc_ep(p_hwfn, false);
  1178. ep->cb_context = iparams->cb_context;
  1179. ep->qp = iparams->qp;
  1180. ep->qp->ep = ep;
  1181. if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
  1182. /* Negotiate ord/ird: if upperlayer requested ord larger than
  1183. * ird advertised by remote, we need to decrease our ord
  1184. */
  1185. if (iparams->ord > ep->cm_info.ird)
  1186. iparams->ord = ep->cm_info.ird;
  1187. if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
  1188. (iparams->ird == 0))
  1189. iparams->ird = 1;
  1190. }
  1191. /* Update cm_info ord/ird to be negotiated values */
  1192. ep->cm_info.ord = iparams->ord;
  1193. ep->cm_info.ird = iparams->ird;
  1194. qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
  1195. ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
  1196. ep->cm_info.private_data_len = iparams->private_data_len +
  1197. mpa_data_size;
  1198. memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
  1199. iparams->private_data, iparams->private_data_len);
  1200. rc = qed_iwarp_mpa_offload(p_hwfn, ep);
  1201. if (rc)
  1202. qed_iwarp_modify_qp(p_hwfn,
  1203. iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
  1204. return rc;
  1205. }
  1206. int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
  1207. {
  1208. struct qed_hwfn *p_hwfn = rdma_cxt;
  1209. struct qed_iwarp_ep *ep;
  1210. u8 mpa_data_size = 0;
  1211. ep = iparams->ep_context;
  1212. if (!ep) {
  1213. DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
  1214. return -EINVAL;
  1215. }
  1216. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
  1217. ep->cb_context = iparams->cb_context;
  1218. ep->qp = NULL;
  1219. qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
  1220. ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
  1221. ep->cm_info.private_data_len = iparams->private_data_len +
  1222. mpa_data_size;
  1223. memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
  1224. iparams->private_data, iparams->private_data_len);
  1225. return qed_iwarp_mpa_offload(p_hwfn, ep);
  1226. }
  1227. static void
  1228. qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
  1229. struct qed_iwarp_cm_info *cm_info)
  1230. {
  1231. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
  1232. cm_info->ip_version);
  1233. if (cm_info->ip_version == QED_TCP_IPV4)
  1234. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  1235. "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
  1236. cm_info->remote_ip, cm_info->remote_port,
  1237. cm_info->local_ip, cm_info->local_port,
  1238. cm_info->vlan);
  1239. else
  1240. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  1241. "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n",
  1242. cm_info->remote_ip, cm_info->remote_port,
  1243. cm_info->local_ip, cm_info->local_port,
  1244. cm_info->vlan);
  1245. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  1246. "private_data_len = %x ord = %d, ird = %d\n",
  1247. cm_info->private_data_len, cm_info->ord, cm_info->ird);
  1248. }
  1249. static int
  1250. qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
  1251. struct qed_iwarp_ll2_buff *buf, u8 handle)
  1252. {
  1253. int rc;
  1254. rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
  1255. (u16)buf->buff_size, buf, 1);
  1256. if (rc) {
  1257. DP_NOTICE(p_hwfn,
  1258. "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
  1259. rc, handle);
  1260. dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
  1261. buf->data, buf->data_phys_addr);
  1262. kfree(buf);
  1263. }
  1264. return rc;
  1265. }
  1266. static bool
  1267. qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
  1268. {
  1269. struct qed_iwarp_ep *ep = NULL;
  1270. bool found = false;
  1271. list_for_each_entry(ep,
  1272. &p_hwfn->p_rdma_info->iwarp.ep_list,
  1273. list_entry) {
  1274. if ((ep->cm_info.local_port == cm_info->local_port) &&
  1275. (ep->cm_info.remote_port == cm_info->remote_port) &&
  1276. (ep->cm_info.vlan == cm_info->vlan) &&
  1277. !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
  1278. sizeof(cm_info->local_ip)) &&
  1279. !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
  1280. sizeof(cm_info->remote_ip))) {
  1281. found = true;
  1282. break;
  1283. }
  1284. }
  1285. if (found) {
  1286. DP_NOTICE(p_hwfn,
  1287. "SYN received on active connection - dropping\n");
  1288. qed_iwarp_print_cm_info(p_hwfn, cm_info);
  1289. return true;
  1290. }
  1291. return false;
  1292. }
  1293. static struct qed_iwarp_listener *
  1294. qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
  1295. struct qed_iwarp_cm_info *cm_info)
  1296. {
  1297. struct qed_iwarp_listener *listener = NULL;
  1298. static const u32 ip_zero[4] = { 0, 0, 0, 0 };
  1299. bool found = false;
  1300. qed_iwarp_print_cm_info(p_hwfn, cm_info);
  1301. list_for_each_entry(listener,
  1302. &p_hwfn->p_rdma_info->iwarp.listen_list,
  1303. list_entry) {
  1304. if (listener->port == cm_info->local_port) {
  1305. if (!memcmp(listener->ip_addr,
  1306. ip_zero, sizeof(ip_zero))) {
  1307. found = true;
  1308. break;
  1309. }
  1310. if (!memcmp(listener->ip_addr,
  1311. cm_info->local_ip,
  1312. sizeof(cm_info->local_ip)) &&
  1313. (listener->vlan == cm_info->vlan)) {
  1314. found = true;
  1315. break;
  1316. }
  1317. }
  1318. }
  1319. if (found) {
  1320. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
  1321. listener);
  1322. return listener;
  1323. }
  1324. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
  1325. return NULL;
  1326. }
  1327. static int
  1328. qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
  1329. struct qed_iwarp_cm_info *cm_info,
  1330. void *buf,
  1331. u8 *remote_mac_addr,
  1332. u8 *local_mac_addr,
  1333. int *payload_len, int *tcp_start_offset)
  1334. {
  1335. struct vlan_ethhdr *vethh;
  1336. bool vlan_valid = false;
  1337. struct ipv6hdr *ip6h;
  1338. struct ethhdr *ethh;
  1339. struct tcphdr *tcph;
  1340. struct iphdr *iph;
  1341. int eth_hlen;
  1342. int ip_hlen;
  1343. int eth_type;
  1344. int i;
  1345. ethh = buf;
  1346. eth_type = ntohs(ethh->h_proto);
  1347. if (eth_type == ETH_P_8021Q) {
  1348. vlan_valid = true;
  1349. vethh = (struct vlan_ethhdr *)ethh;
  1350. cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
  1351. eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
  1352. }
  1353. eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
  1354. ether_addr_copy(remote_mac_addr, ethh->h_source);
  1355. ether_addr_copy(local_mac_addr, ethh->h_dest);
  1356. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
  1357. eth_type, ethh->h_source);
  1358. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
  1359. eth_hlen, ethh->h_dest);
  1360. iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
  1361. if (eth_type == ETH_P_IP) {
  1362. cm_info->local_ip[0] = ntohl(iph->daddr);
  1363. cm_info->remote_ip[0] = ntohl(iph->saddr);
  1364. cm_info->ip_version = TCP_IPV4;
  1365. ip_hlen = (iph->ihl) * sizeof(u32);
  1366. *payload_len = ntohs(iph->tot_len) - ip_hlen;
  1367. } else if (eth_type == ETH_P_IPV6) {
  1368. ip6h = (struct ipv6hdr *)iph;
  1369. for (i = 0; i < 4; i++) {
  1370. cm_info->local_ip[i] =
  1371. ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
  1372. cm_info->remote_ip[i] =
  1373. ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
  1374. }
  1375. cm_info->ip_version = TCP_IPV6;
  1376. ip_hlen = sizeof(*ip6h);
  1377. *payload_len = ntohs(ip6h->payload_len);
  1378. } else {
  1379. DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
  1380. return -EINVAL;
  1381. }
  1382. tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
  1383. if (!tcph->syn) {
  1384. DP_NOTICE(p_hwfn,
  1385. "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
  1386. iph->ihl, tcph->source, tcph->dest);
  1387. return -EINVAL;
  1388. }
  1389. cm_info->local_port = ntohs(tcph->dest);
  1390. cm_info->remote_port = ntohs(tcph->source);
  1391. qed_iwarp_print_cm_info(p_hwfn, cm_info);
  1392. *tcp_start_offset = eth_hlen + ip_hlen;
  1393. return 0;
  1394. }
  1395. static void
  1396. qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
  1397. {
  1398. struct qed_iwarp_ll2_buff *buf = data->cookie;
  1399. struct qed_iwarp_listener *listener;
  1400. struct qed_ll2_tx_pkt_info tx_pkt;
  1401. struct qed_iwarp_cm_info cm_info;
  1402. struct qed_hwfn *p_hwfn = cxt;
  1403. u8 remote_mac_addr[ETH_ALEN];
  1404. u8 local_mac_addr[ETH_ALEN];
  1405. struct qed_iwarp_ep *ep;
  1406. int tcp_start_offset;
  1407. u8 ts_hdr_size = 0;
  1408. u8 ll2_syn_handle;
  1409. int payload_len;
  1410. u32 hdr_size;
  1411. int rc;
  1412. memset(&cm_info, 0, sizeof(cm_info));
  1413. ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
  1414. if (GET_FIELD(data->parse_flags,
  1415. PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
  1416. GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
  1417. DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
  1418. goto err;
  1419. }
  1420. rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
  1421. data->u.placement_offset, remote_mac_addr,
  1422. local_mac_addr, &payload_len,
  1423. &tcp_start_offset);
  1424. if (rc)
  1425. goto err;
  1426. /* Check if there is a listener for this 4-tuple+vlan */
  1427. listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
  1428. if (!listener) {
  1429. DP_VERBOSE(p_hwfn,
  1430. QED_MSG_RDMA,
  1431. "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
  1432. data->parse_flags, data->length.packet_length);
  1433. memset(&tx_pkt, 0, sizeof(tx_pkt));
  1434. tx_pkt.num_of_bds = 1;
  1435. tx_pkt.vlan = data->vlan;
  1436. if (GET_FIELD(data->parse_flags,
  1437. PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
  1438. SET_FIELD(tx_pkt.bd_flags,
  1439. CORE_TX_BD_DATA_VLAN_INSERTION, 1);
  1440. tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
  1441. tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
  1442. tx_pkt.first_frag = buf->data_phys_addr +
  1443. data->u.placement_offset;
  1444. tx_pkt.first_frag_len = data->length.packet_length;
  1445. tx_pkt.cookie = buf;
  1446. rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
  1447. &tx_pkt, true);
  1448. if (rc) {
  1449. DP_NOTICE(p_hwfn,
  1450. "Can't post SYN back to chip rc=%d\n", rc);
  1451. goto err;
  1452. }
  1453. return;
  1454. }
  1455. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
  1456. /* There may be an open ep on this connection if this is a syn
  1457. * retrasnmit... need to make sure there isn't...
  1458. */
  1459. if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
  1460. goto err;
  1461. ep = qed_iwarp_get_free_ep(p_hwfn);
  1462. if (!ep)
  1463. goto err;
  1464. spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1465. list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
  1466. spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1467. ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
  1468. ether_addr_copy(ep->local_mac_addr, local_mac_addr);
  1469. memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
  1470. if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
  1471. ts_hdr_size = TIMESTAMP_HEADER_SIZE;
  1472. hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) +
  1473. ts_hdr_size;
  1474. ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
  1475. ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
  1476. ep->event_cb = listener->event_cb;
  1477. ep->cb_context = listener->cb_context;
  1478. ep->connect_mode = TCP_CONNECT_PASSIVE;
  1479. ep->syn = buf;
  1480. ep->syn_ip_payload_length = (u16)payload_len;
  1481. ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
  1482. tcp_start_offset;
  1483. rc = qed_iwarp_tcp_offload(p_hwfn, ep);
  1484. if (rc) {
  1485. qed_iwarp_return_ep(p_hwfn, ep);
  1486. goto err;
  1487. }
  1488. return;
  1489. err:
  1490. qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
  1491. }
  1492. static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
  1493. void *cookie, dma_addr_t rx_buf_addr,
  1494. bool b_last_packet)
  1495. {
  1496. struct qed_iwarp_ll2_buff *buffer = cookie;
  1497. struct qed_hwfn *p_hwfn = cxt;
  1498. dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
  1499. buffer->data, buffer->data_phys_addr);
  1500. kfree(buffer);
  1501. }
  1502. static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
  1503. void *cookie, dma_addr_t first_frag_addr,
  1504. bool b_last_fragment, bool b_last_packet)
  1505. {
  1506. struct qed_iwarp_ll2_buff *buffer = cookie;
  1507. struct qed_hwfn *p_hwfn = cxt;
  1508. /* this was originally an rx packet, post it back */
  1509. qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
  1510. }
  1511. static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
  1512. void *cookie, dma_addr_t first_frag_addr,
  1513. bool b_last_fragment, bool b_last_packet)
  1514. {
  1515. struct qed_iwarp_ll2_buff *buffer = cookie;
  1516. struct qed_hwfn *p_hwfn = cxt;
  1517. if (!buffer)
  1518. return;
  1519. dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
  1520. buffer->data, buffer->data_phys_addr);
  1521. kfree(buffer);
  1522. }
  1523. static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1524. {
  1525. struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
  1526. int rc = 0;
  1527. if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
  1528. rc = qed_ll2_terminate_connection(p_hwfn,
  1529. iwarp_info->ll2_syn_handle);
  1530. if (rc)
  1531. DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
  1532. qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
  1533. iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
  1534. }
  1535. qed_llh_remove_mac_filter(p_hwfn,
  1536. p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr);
  1537. return rc;
  1538. }
  1539. static int
  1540. qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
  1541. int num_rx_bufs, int buff_size, u8 ll2_handle)
  1542. {
  1543. struct qed_iwarp_ll2_buff *buffer;
  1544. int rc = 0;
  1545. int i;
  1546. for (i = 0; i < num_rx_bufs; i++) {
  1547. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  1548. if (!buffer) {
  1549. rc = -ENOMEM;
  1550. break;
  1551. }
  1552. buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  1553. buff_size,
  1554. &buffer->data_phys_addr,
  1555. GFP_KERNEL);
  1556. if (!buffer->data) {
  1557. kfree(buffer);
  1558. rc = -ENOMEM;
  1559. break;
  1560. }
  1561. buffer->buff_size = buff_size;
  1562. rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
  1563. if (rc)
  1564. /* buffers will be deallocated by qed_ll2 */
  1565. break;
  1566. }
  1567. return rc;
  1568. }
  1569. #define QED_IWARP_MAX_BUF_SIZE(mtu) \
  1570. ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
  1571. ETH_CACHE_LINE_SIZE)
  1572. static int
  1573. qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
  1574. struct qed_rdma_start_in_params *params,
  1575. struct qed_ptt *p_ptt)
  1576. {
  1577. struct qed_iwarp_info *iwarp_info;
  1578. struct qed_ll2_acquire_data data;
  1579. struct qed_ll2_cbs cbs;
  1580. int rc = 0;
  1581. iwarp_info = &p_hwfn->p_rdma_info->iwarp;
  1582. iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
  1583. iwarp_info->max_mtu = params->max_mtu;
  1584. ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
  1585. rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, params->mac_addr);
  1586. if (rc)
  1587. return rc;
  1588. /* Start SYN connection */
  1589. cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
  1590. cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
  1591. cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
  1592. cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
  1593. cbs.cookie = p_hwfn;
  1594. memset(&data, 0, sizeof(data));
  1595. data.input.conn_type = QED_LL2_TYPE_IWARP;
  1596. data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
  1597. data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
  1598. data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
  1599. data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
  1600. data.input.tx_tc = PKT_LB_TC;
  1601. data.input.tx_dest = QED_LL2_TX_DEST_LB;
  1602. data.p_connection_handle = &iwarp_info->ll2_syn_handle;
  1603. data.cbs = &cbs;
  1604. rc = qed_ll2_acquire_connection(p_hwfn, &data);
  1605. if (rc) {
  1606. DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
  1607. qed_llh_remove_mac_filter(p_hwfn, p_ptt, params->mac_addr);
  1608. return rc;
  1609. }
  1610. rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
  1611. if (rc) {
  1612. DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
  1613. goto err;
  1614. }
  1615. rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
  1616. QED_IWARP_LL2_SYN_RX_SIZE,
  1617. QED_IWARP_MAX_SYN_PKT_SIZE,
  1618. iwarp_info->ll2_syn_handle);
  1619. if (rc)
  1620. goto err;
  1621. return rc;
  1622. err:
  1623. qed_iwarp_ll2_stop(p_hwfn, p_ptt);
  1624. return rc;
  1625. }
  1626. int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
  1627. struct qed_rdma_start_in_params *params)
  1628. {
  1629. struct qed_iwarp_info *iwarp_info;
  1630. u32 rcv_wnd_size;
  1631. iwarp_info = &p_hwfn->p_rdma_info->iwarp;
  1632. iwarp_info->tcp_flags = QED_IWARP_TS_EN;
  1633. rcv_wnd_size = QED_IWARP_RCV_WND_SIZE_DEF;
  1634. /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
  1635. iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
  1636. ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
  1637. iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
  1638. iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
  1639. iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
  1640. iwarp_info->rtr_type = MPA_RTR_TYPE_ZERO_SEND |
  1641. MPA_RTR_TYPE_ZERO_WRITE |
  1642. MPA_RTR_TYPE_ZERO_READ;
  1643. spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
  1644. INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
  1645. INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
  1646. qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
  1647. qed_iwarp_async_event);
  1648. return qed_iwarp_ll2_start(p_hwfn, params, p_ptt);
  1649. }
  1650. int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1651. {
  1652. int rc;
  1653. qed_iwarp_free_prealloc_ep(p_hwfn);
  1654. rc = qed_iwarp_wait_for_all_cids(p_hwfn);
  1655. if (rc)
  1656. return rc;
  1657. qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
  1658. return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
  1659. }
  1660. void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
  1661. struct qed_iwarp_ep *ep, u8 fw_return_code)
  1662. {
  1663. struct qed_iwarp_cm_event_params params;
  1664. qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
  1665. params.event = QED_IWARP_EVENT_CLOSE;
  1666. params.ep_context = ep;
  1667. params.cm_info = &ep->cm_info;
  1668. params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
  1669. 0 : -ECONNRESET;
  1670. ep->state = QED_IWARP_EP_CLOSED;
  1671. spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1672. list_del(&ep->list_entry);
  1673. spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1674. ep->event_cb(ep->cb_context, &params);
  1675. }
  1676. void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
  1677. struct qed_iwarp_ep *ep, int fw_ret_code)
  1678. {
  1679. struct qed_iwarp_cm_event_params params;
  1680. bool event_cb = false;
  1681. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
  1682. ep->cid, fw_ret_code);
  1683. switch (fw_ret_code) {
  1684. case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
  1685. params.status = 0;
  1686. params.event = QED_IWARP_EVENT_DISCONNECT;
  1687. event_cb = true;
  1688. break;
  1689. case IWARP_EXCEPTION_DETECTED_LLP_RESET:
  1690. params.status = -ECONNRESET;
  1691. params.event = QED_IWARP_EVENT_DISCONNECT;
  1692. event_cb = true;
  1693. break;
  1694. case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
  1695. params.event = QED_IWARP_EVENT_RQ_EMPTY;
  1696. event_cb = true;
  1697. break;
  1698. case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
  1699. params.event = QED_IWARP_EVENT_IRQ_FULL;
  1700. event_cb = true;
  1701. break;
  1702. case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
  1703. params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
  1704. event_cb = true;
  1705. break;
  1706. case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
  1707. params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
  1708. event_cb = true;
  1709. break;
  1710. case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
  1711. params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
  1712. event_cb = true;
  1713. break;
  1714. case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
  1715. params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
  1716. event_cb = true;
  1717. break;
  1718. case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
  1719. params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
  1720. event_cb = true;
  1721. break;
  1722. case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
  1723. params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
  1724. event_cb = true;
  1725. break;
  1726. case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
  1727. params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
  1728. event_cb = true;
  1729. break;
  1730. default:
  1731. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  1732. "Unhandled exception received...fw_ret_code=%d\n",
  1733. fw_ret_code);
  1734. break;
  1735. }
  1736. if (event_cb) {
  1737. params.ep_context = ep;
  1738. params.cm_info = &ep->cm_info;
  1739. ep->event_cb(ep->cb_context, &params);
  1740. }
  1741. }
  1742. static void
  1743. qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
  1744. struct qed_iwarp_ep *ep, u8 fw_return_code)
  1745. {
  1746. struct qed_iwarp_cm_event_params params;
  1747. memset(&params, 0, sizeof(params));
  1748. params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
  1749. params.ep_context = ep;
  1750. params.cm_info = &ep->cm_info;
  1751. ep->state = QED_IWARP_EP_CLOSED;
  1752. switch (fw_return_code) {
  1753. case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
  1754. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  1755. "%s(0x%x) TCP connect got invalid packet\n",
  1756. QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
  1757. params.status = -ECONNRESET;
  1758. break;
  1759. case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
  1760. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  1761. "%s(0x%x) TCP Connection Reset\n",
  1762. QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
  1763. params.status = -ECONNRESET;
  1764. break;
  1765. case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
  1766. DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
  1767. QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
  1768. params.status = -EBUSY;
  1769. break;
  1770. case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
  1771. DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
  1772. QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
  1773. params.status = -ECONNREFUSED;
  1774. break;
  1775. case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
  1776. DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
  1777. QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
  1778. params.status = -ECONNRESET;
  1779. break;
  1780. default:
  1781. DP_ERR(p_hwfn,
  1782. "%s(0x%x) Unexpected return code tcp connect: %d\n",
  1783. QED_IWARP_CONNECT_MODE_STRING(ep),
  1784. ep->tcp_cid, fw_return_code);
  1785. params.status = -ECONNRESET;
  1786. break;
  1787. }
  1788. if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
  1789. ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
  1790. qed_iwarp_return_ep(p_hwfn, ep);
  1791. } else {
  1792. ep->event_cb(ep->cb_context, &params);
  1793. spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1794. list_del(&ep->list_entry);
  1795. spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1796. }
  1797. }
  1798. void
  1799. qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
  1800. struct qed_iwarp_ep *ep, u8 fw_return_code)
  1801. {
  1802. u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
  1803. if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
  1804. /* Done with the SYN packet, post back to ll2 rx */
  1805. qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
  1806. ep->syn = NULL;
  1807. /* If connect failed - upper layer doesn't know about it */
  1808. if (fw_return_code == RDMA_RETURN_OK)
  1809. qed_iwarp_mpa_received(p_hwfn, ep);
  1810. else
  1811. qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
  1812. fw_return_code);
  1813. } else {
  1814. if (fw_return_code == RDMA_RETURN_OK)
  1815. qed_iwarp_mpa_offload(p_hwfn, ep);
  1816. else
  1817. qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
  1818. fw_return_code);
  1819. }
  1820. }
  1821. static inline bool
  1822. qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
  1823. {
  1824. if (!ep || (ep->sig != QED_EP_SIG)) {
  1825. DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
  1826. return false;
  1827. }
  1828. return true;
  1829. }
  1830. static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
  1831. u8 fw_event_code, u16 echo,
  1832. union event_ring_data *data,
  1833. u8 fw_return_code)
  1834. {
  1835. struct regpair *fw_handle = &data->rdma_data.async_handle;
  1836. struct qed_iwarp_ep *ep = NULL;
  1837. u16 cid;
  1838. ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
  1839. fw_handle->lo);
  1840. switch (fw_event_code) {
  1841. case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
  1842. /* Async completion after TCP 3-way handshake */
  1843. if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
  1844. return -EINVAL;
  1845. DP_VERBOSE(p_hwfn,
  1846. QED_MSG_RDMA,
  1847. "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
  1848. ep->tcp_cid, fw_return_code);
  1849. qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
  1850. break;
  1851. case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
  1852. if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
  1853. return -EINVAL;
  1854. DP_VERBOSE(p_hwfn,
  1855. QED_MSG_RDMA,
  1856. "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
  1857. ep->cid, fw_return_code);
  1858. qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
  1859. break;
  1860. case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
  1861. /* Async completion for Close Connection ramrod */
  1862. if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
  1863. return -EINVAL;
  1864. DP_VERBOSE(p_hwfn,
  1865. QED_MSG_RDMA,
  1866. "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
  1867. ep->cid, fw_return_code);
  1868. qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
  1869. break;
  1870. case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
  1871. /* Async event for active side only */
  1872. if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
  1873. return -EINVAL;
  1874. DP_VERBOSE(p_hwfn,
  1875. QED_MSG_RDMA,
  1876. "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
  1877. ep->cid, fw_return_code);
  1878. qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
  1879. break;
  1880. case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
  1881. if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
  1882. return -EINVAL;
  1883. DP_VERBOSE(p_hwfn,
  1884. QED_MSG_RDMA,
  1885. "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
  1886. ep->cid, fw_return_code);
  1887. qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
  1888. break;
  1889. case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
  1890. cid = (u16)le32_to_cpu(fw_handle->lo);
  1891. DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
  1892. "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
  1893. qed_iwarp_cid_cleaned(p_hwfn, cid);
  1894. break;
  1895. case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
  1896. DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
  1897. p_hwfn->p_rdma_info->events.affiliated_event(
  1898. p_hwfn->p_rdma_info->events.context,
  1899. QED_IWARP_EVENT_CQ_OVERFLOW,
  1900. (void *)fw_handle);
  1901. break;
  1902. default:
  1903. DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
  1904. fw_event_code);
  1905. return -EINVAL;
  1906. }
  1907. return 0;
  1908. }
  1909. int
  1910. qed_iwarp_create_listen(void *rdma_cxt,
  1911. struct qed_iwarp_listen_in *iparams,
  1912. struct qed_iwarp_listen_out *oparams)
  1913. {
  1914. struct qed_hwfn *p_hwfn = rdma_cxt;
  1915. struct qed_iwarp_listener *listener;
  1916. listener = kzalloc(sizeof(*listener), GFP_KERNEL);
  1917. if (!listener)
  1918. return -ENOMEM;
  1919. listener->ip_version = iparams->ip_version;
  1920. memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
  1921. listener->port = iparams->port;
  1922. listener->vlan = iparams->vlan;
  1923. listener->event_cb = iparams->event_cb;
  1924. listener->cb_context = iparams->cb_context;
  1925. listener->max_backlog = iparams->max_backlog;
  1926. oparams->handle = listener;
  1927. spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1928. list_add_tail(&listener->list_entry,
  1929. &p_hwfn->p_rdma_info->iwarp.listen_list);
  1930. spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1931. DP_VERBOSE(p_hwfn,
  1932. QED_MSG_RDMA,
  1933. "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
  1934. listener->event_cb,
  1935. listener,
  1936. listener->ip_addr[0],
  1937. listener->ip_addr[1],
  1938. listener->ip_addr[2],
  1939. listener->ip_addr[3], listener->port, listener->vlan);
  1940. return 0;
  1941. }
  1942. int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
  1943. {
  1944. struct qed_iwarp_listener *listener = handle;
  1945. struct qed_hwfn *p_hwfn = rdma_cxt;
  1946. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
  1947. spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1948. list_del(&listener->list_entry);
  1949. spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
  1950. kfree(listener);
  1951. return 0;
  1952. }
  1953. int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
  1954. {
  1955. struct qed_hwfn *p_hwfn = rdma_cxt;
  1956. struct qed_sp_init_data init_data;
  1957. struct qed_spq_entry *p_ent;
  1958. struct qed_iwarp_ep *ep;
  1959. struct qed_rdma_qp *qp;
  1960. int rc;
  1961. ep = iparams->ep_context;
  1962. if (!ep) {
  1963. DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
  1964. return -EINVAL;
  1965. }
  1966. qp = ep->qp;
  1967. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
  1968. qp->icid, ep->tcp_cid);
  1969. memset(&init_data, 0, sizeof(init_data));
  1970. init_data.cid = qp->icid;
  1971. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  1972. init_data.comp_mode = QED_SPQ_MODE_CB;
  1973. rc = qed_sp_init_request(p_hwfn, &p_ent,
  1974. IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
  1975. PROTOCOLID_IWARP, &init_data);
  1976. if (rc)
  1977. return rc;
  1978. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  1979. DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
  1980. return rc;
  1981. }
  1982. void
  1983. qed_iwarp_query_qp(struct qed_rdma_qp *qp,
  1984. struct qed_rdma_query_qp_out_params *out_params)
  1985. {
  1986. out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
  1987. }