cq.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022
  1. /*
  2. * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include "iw_cxgb4.h"
  33. static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
  34. struct c4iw_dev_ucontext *uctx)
  35. {
  36. struct fw_ri_res_wr *res_wr;
  37. struct fw_ri_res *res;
  38. int wr_len;
  39. struct c4iw_wr_wait wr_wait;
  40. struct sk_buff *skb;
  41. int ret;
  42. wr_len = sizeof *res_wr + sizeof *res;
  43. skb = alloc_skb(wr_len, GFP_KERNEL);
  44. if (!skb)
  45. return -ENOMEM;
  46. set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
  47. res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
  48. memset(res_wr, 0, wr_len);
  49. res_wr->op_nres = cpu_to_be32(
  50. FW_WR_OP_V(FW_RI_RES_WR) |
  51. FW_RI_RES_WR_NRES_V(1) |
  52. FW_WR_COMPL_F);
  53. res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
  54. res_wr->cookie = (uintptr_t)&wr_wait;
  55. res = res_wr->res;
  56. res->u.cq.restype = FW_RI_RES_TYPE_CQ;
  57. res->u.cq.op = FW_RI_RES_OP_RESET;
  58. res->u.cq.iqid = cpu_to_be32(cq->cqid);
  59. c4iw_init_wr_wait(&wr_wait);
  60. ret = c4iw_ofld_send(rdev, skb);
  61. if (!ret) {
  62. ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
  63. }
  64. kfree(cq->sw_queue);
  65. dma_free_coherent(&(rdev->lldi.pdev->dev),
  66. cq->memsize, cq->queue,
  67. dma_unmap_addr(cq, mapping));
  68. c4iw_put_cqid(rdev, cq->cqid, uctx);
  69. return ret;
  70. }
  71. static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
  72. struct c4iw_dev_ucontext *uctx)
  73. {
  74. struct fw_ri_res_wr *res_wr;
  75. struct fw_ri_res *res;
  76. int wr_len;
  77. int user = (uctx != &rdev->uctx);
  78. struct c4iw_wr_wait wr_wait;
  79. int ret;
  80. struct sk_buff *skb;
  81. cq->cqid = c4iw_get_cqid(rdev, uctx);
  82. if (!cq->cqid) {
  83. ret = -ENOMEM;
  84. goto err1;
  85. }
  86. if (!user) {
  87. cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
  88. if (!cq->sw_queue) {
  89. ret = -ENOMEM;
  90. goto err2;
  91. }
  92. }
  93. cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
  94. &cq->dma_addr, GFP_KERNEL);
  95. if (!cq->queue) {
  96. ret = -ENOMEM;
  97. goto err3;
  98. }
  99. dma_unmap_addr_set(cq, mapping, cq->dma_addr);
  100. memset(cq->queue, 0, cq->memsize);
  101. /* build fw_ri_res_wr */
  102. wr_len = sizeof *res_wr + sizeof *res;
  103. skb = alloc_skb(wr_len, GFP_KERNEL);
  104. if (!skb) {
  105. ret = -ENOMEM;
  106. goto err4;
  107. }
  108. set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
  109. res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
  110. memset(res_wr, 0, wr_len);
  111. res_wr->op_nres = cpu_to_be32(
  112. FW_WR_OP_V(FW_RI_RES_WR) |
  113. FW_RI_RES_WR_NRES_V(1) |
  114. FW_WR_COMPL_F);
  115. res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
  116. res_wr->cookie = (uintptr_t)&wr_wait;
  117. res = res_wr->res;
  118. res->u.cq.restype = FW_RI_RES_TYPE_CQ;
  119. res->u.cq.op = FW_RI_RES_OP_WRITE;
  120. res->u.cq.iqid = cpu_to_be32(cq->cqid);
  121. res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
  122. FW_RI_RES_WR_IQANUS_V(0) |
  123. FW_RI_RES_WR_IQANUD_V(1) |
  124. FW_RI_RES_WR_IQANDST_F |
  125. FW_RI_RES_WR_IQANDSTINDEX_V(
  126. rdev->lldi.ciq_ids[cq->vector]));
  127. res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
  128. FW_RI_RES_WR_IQDROPRSS_F |
  129. FW_RI_RES_WR_IQPCIECH_V(2) |
  130. FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
  131. FW_RI_RES_WR_IQO_F |
  132. FW_RI_RES_WR_IQESIZE_V(1));
  133. res->u.cq.iqsize = cpu_to_be16(cq->size);
  134. res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
  135. c4iw_init_wr_wait(&wr_wait);
  136. ret = c4iw_ofld_send(rdev, skb);
  137. if (ret)
  138. goto err4;
  139. PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
  140. ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
  141. if (ret)
  142. goto err4;
  143. cq->gen = 1;
  144. cq->gts = rdev->lldi.gts_reg;
  145. cq->rdev = rdev;
  146. cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
  147. &cq->bar2_qid,
  148. user ? &cq->bar2_pa : NULL);
  149. if (user && !cq->bar2_va) {
  150. pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
  151. pci_name(rdev->lldi.pdev), cq->cqid);
  152. ret = -EINVAL;
  153. goto err4;
  154. }
  155. return 0;
  156. err4:
  157. dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
  158. dma_unmap_addr(cq, mapping));
  159. err3:
  160. kfree(cq->sw_queue);
  161. err2:
  162. c4iw_put_cqid(rdev, cq->cqid, uctx);
  163. err1:
  164. return ret;
  165. }
  166. static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
  167. {
  168. struct t4_cqe cqe;
  169. PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
  170. wq, cq, cq->sw_cidx, cq->sw_pidx);
  171. memset(&cqe, 0, sizeof(cqe));
  172. cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
  173. CQE_OPCODE_V(FW_RI_SEND) |
  174. CQE_TYPE_V(0) |
  175. CQE_SWCQE_V(1) |
  176. CQE_QPID_V(wq->sq.qid));
  177. cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
  178. cq->sw_queue[cq->sw_pidx] = cqe;
  179. t4_swcq_produce(cq);
  180. }
  181. int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
  182. {
  183. int flushed = 0;
  184. int in_use = wq->rq.in_use - count;
  185. BUG_ON(in_use < 0);
  186. PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
  187. wq, cq, wq->rq.in_use, count);
  188. while (in_use--) {
  189. insert_recv_cqe(wq, cq);
  190. flushed++;
  191. }
  192. return flushed;
  193. }
  194. static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
  195. struct t4_swsqe *swcqe)
  196. {
  197. struct t4_cqe cqe;
  198. PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
  199. wq, cq, cq->sw_cidx, cq->sw_pidx);
  200. memset(&cqe, 0, sizeof(cqe));
  201. cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
  202. CQE_OPCODE_V(swcqe->opcode) |
  203. CQE_TYPE_V(1) |
  204. CQE_SWCQE_V(1) |
  205. CQE_QPID_V(wq->sq.qid));
  206. CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
  207. cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
  208. cq->sw_queue[cq->sw_pidx] = cqe;
  209. t4_swcq_produce(cq);
  210. }
  211. static void advance_oldest_read(struct t4_wq *wq);
  212. int c4iw_flush_sq(struct c4iw_qp *qhp)
  213. {
  214. int flushed = 0;
  215. struct t4_wq *wq = &qhp->wq;
  216. struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
  217. struct t4_cq *cq = &chp->cq;
  218. int idx;
  219. struct t4_swsqe *swsqe;
  220. if (wq->sq.flush_cidx == -1)
  221. wq->sq.flush_cidx = wq->sq.cidx;
  222. idx = wq->sq.flush_cidx;
  223. BUG_ON(idx >= wq->sq.size);
  224. while (idx != wq->sq.pidx) {
  225. swsqe = &wq->sq.sw_sq[idx];
  226. BUG_ON(swsqe->flushed);
  227. swsqe->flushed = 1;
  228. insert_sq_cqe(wq, cq, swsqe);
  229. if (wq->sq.oldest_read == swsqe) {
  230. BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
  231. advance_oldest_read(wq);
  232. }
  233. flushed++;
  234. if (++idx == wq->sq.size)
  235. idx = 0;
  236. }
  237. wq->sq.flush_cidx += flushed;
  238. if (wq->sq.flush_cidx >= wq->sq.size)
  239. wq->sq.flush_cidx -= wq->sq.size;
  240. return flushed;
  241. }
  242. static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
  243. {
  244. struct t4_swsqe *swsqe;
  245. int cidx;
  246. if (wq->sq.flush_cidx == -1)
  247. wq->sq.flush_cidx = wq->sq.cidx;
  248. cidx = wq->sq.flush_cidx;
  249. BUG_ON(cidx > wq->sq.size);
  250. while (cidx != wq->sq.pidx) {
  251. swsqe = &wq->sq.sw_sq[cidx];
  252. if (!swsqe->signaled) {
  253. if (++cidx == wq->sq.size)
  254. cidx = 0;
  255. } else if (swsqe->complete) {
  256. BUG_ON(swsqe->flushed);
  257. /*
  258. * Insert this completed cqe into the swcq.
  259. */
  260. PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
  261. __func__, cidx, cq->sw_pidx);
  262. swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
  263. cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
  264. t4_swcq_produce(cq);
  265. swsqe->flushed = 1;
  266. if (++cidx == wq->sq.size)
  267. cidx = 0;
  268. wq->sq.flush_cidx = cidx;
  269. } else
  270. break;
  271. }
  272. }
  273. static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
  274. struct t4_cqe *read_cqe)
  275. {
  276. read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
  277. read_cqe->len = htonl(wq->sq.oldest_read->read_len);
  278. read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
  279. CQE_SWCQE_V(SW_CQE(hw_cqe)) |
  280. CQE_OPCODE_V(FW_RI_READ_REQ) |
  281. CQE_TYPE_V(1));
  282. read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
  283. }
  284. static void advance_oldest_read(struct t4_wq *wq)
  285. {
  286. u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
  287. if (rptr == wq->sq.size)
  288. rptr = 0;
  289. while (rptr != wq->sq.pidx) {
  290. wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
  291. if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
  292. return;
  293. if (++rptr == wq->sq.size)
  294. rptr = 0;
  295. }
  296. wq->sq.oldest_read = NULL;
  297. }
  298. /*
  299. * Move all CQEs from the HWCQ into the SWCQ.
  300. * Deal with out-of-order and/or completions that complete
  301. * prior unsignalled WRs.
  302. */
  303. void c4iw_flush_hw_cq(struct c4iw_cq *chp)
  304. {
  305. struct t4_cqe *hw_cqe, *swcqe, read_cqe;
  306. struct c4iw_qp *qhp;
  307. struct t4_swsqe *swsqe;
  308. int ret;
  309. PDBG("%s cqid 0x%x\n", __func__, chp->cq.cqid);
  310. ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
  311. /*
  312. * This logic is similar to poll_cq(), but not quite the same
  313. * unfortunately. Need to move pertinent HW CQEs to the SW CQ but
  314. * also do any translation magic that poll_cq() normally does.
  315. */
  316. while (!ret) {
  317. qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
  318. /*
  319. * drop CQEs with no associated QP
  320. */
  321. if (qhp == NULL)
  322. goto next_cqe;
  323. if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
  324. goto next_cqe;
  325. if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
  326. /* If we have reached here because of async
  327. * event or other error, and have egress error
  328. * then drop
  329. */
  330. if (CQE_TYPE(hw_cqe) == 1)
  331. goto next_cqe;
  332. /* drop peer2peer RTR reads.
  333. */
  334. if (CQE_WRID_STAG(hw_cqe) == 1)
  335. goto next_cqe;
  336. /*
  337. * Eat completions for unsignaled read WRs.
  338. */
  339. if (!qhp->wq.sq.oldest_read->signaled) {
  340. advance_oldest_read(&qhp->wq);
  341. goto next_cqe;
  342. }
  343. /*
  344. * Don't write to the HWCQ, create a new read req CQE
  345. * in local memory and move it into the swcq.
  346. */
  347. create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
  348. hw_cqe = &read_cqe;
  349. advance_oldest_read(&qhp->wq);
  350. }
  351. /* if its a SQ completion, then do the magic to move all the
  352. * unsignaled and now in-order completions into the swcq.
  353. */
  354. if (SQ_TYPE(hw_cqe)) {
  355. swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
  356. swsqe->cqe = *hw_cqe;
  357. swsqe->complete = 1;
  358. flush_completed_wrs(&qhp->wq, &chp->cq);
  359. } else {
  360. swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
  361. *swcqe = *hw_cqe;
  362. swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
  363. t4_swcq_produce(&chp->cq);
  364. }
  365. next_cqe:
  366. t4_hwcq_consume(&chp->cq);
  367. ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
  368. }
  369. }
  370. static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
  371. {
  372. if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
  373. return 0;
  374. if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
  375. return 0;
  376. if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
  377. return 0;
  378. if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
  379. return 0;
  380. return 1;
  381. }
  382. void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
  383. {
  384. struct t4_cqe *cqe;
  385. u32 ptr;
  386. *count = 0;
  387. PDBG("%s count zero %d\n", __func__, *count);
  388. ptr = cq->sw_cidx;
  389. while (ptr != cq->sw_pidx) {
  390. cqe = &cq->sw_queue[ptr];
  391. if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
  392. (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
  393. (*count)++;
  394. if (++ptr == cq->size)
  395. ptr = 0;
  396. }
  397. PDBG("%s cq %p count %d\n", __func__, cq, *count);
  398. }
  399. /*
  400. * poll_cq
  401. *
  402. * Caller must:
  403. * check the validity of the first CQE,
  404. * supply the wq assicated with the qpid.
  405. *
  406. * credit: cq credit to return to sge.
  407. * cqe_flushed: 1 iff the CQE is flushed.
  408. * cqe: copy of the polled CQE.
  409. *
  410. * return value:
  411. * 0 CQE returned ok.
  412. * -EAGAIN CQE skipped, try again.
  413. * -EOVERFLOW CQ overflow detected.
  414. */
  415. static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
  416. u8 *cqe_flushed, u64 *cookie, u32 *credit)
  417. {
  418. int ret = 0;
  419. struct t4_cqe *hw_cqe, read_cqe;
  420. *cqe_flushed = 0;
  421. *credit = 0;
  422. ret = t4_next_cqe(cq, &hw_cqe);
  423. if (ret)
  424. return ret;
  425. PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
  426. " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
  427. __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
  428. CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
  429. CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
  430. CQE_WRID_LOW(hw_cqe));
  431. /*
  432. * skip cqe's not affiliated with a QP.
  433. */
  434. if (wq == NULL) {
  435. ret = -EAGAIN;
  436. goto skip_cqe;
  437. }
  438. /*
  439. * skip hw cqe's if the wq is flushed.
  440. */
  441. if (wq->flushed && !SW_CQE(hw_cqe)) {
  442. ret = -EAGAIN;
  443. goto skip_cqe;
  444. }
  445. /*
  446. * skip TERMINATE cqes...
  447. */
  448. if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
  449. ret = -EAGAIN;
  450. goto skip_cqe;
  451. }
  452. /*
  453. * Gotta tweak READ completions:
  454. * 1) the cqe doesn't contain the sq_wptr from the wr.
  455. * 2) opcode not reflected from the wr.
  456. * 3) read_len not reflected from the wr.
  457. * 4) cq_type is RQ_TYPE not SQ_TYPE.
  458. */
  459. if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
  460. /* If we have reached here because of async
  461. * event or other error, and have egress error
  462. * then drop
  463. */
  464. if (CQE_TYPE(hw_cqe) == 1) {
  465. if (CQE_STATUS(hw_cqe))
  466. t4_set_wq_in_error(wq);
  467. ret = -EAGAIN;
  468. goto skip_cqe;
  469. }
  470. /* If this is an unsolicited read response, then the read
  471. * was generated by the kernel driver as part of peer-2-peer
  472. * connection setup. So ignore the completion.
  473. */
  474. if (CQE_WRID_STAG(hw_cqe) == 1) {
  475. if (CQE_STATUS(hw_cqe))
  476. t4_set_wq_in_error(wq);
  477. ret = -EAGAIN;
  478. goto skip_cqe;
  479. }
  480. /*
  481. * Eat completions for unsignaled read WRs.
  482. */
  483. if (!wq->sq.oldest_read->signaled) {
  484. advance_oldest_read(wq);
  485. ret = -EAGAIN;
  486. goto skip_cqe;
  487. }
  488. /*
  489. * Don't write to the HWCQ, so create a new read req CQE
  490. * in local memory.
  491. */
  492. create_read_req_cqe(wq, hw_cqe, &read_cqe);
  493. hw_cqe = &read_cqe;
  494. advance_oldest_read(wq);
  495. }
  496. if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
  497. *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
  498. t4_set_wq_in_error(wq);
  499. }
  500. /*
  501. * RECV completion.
  502. */
  503. if (RQ_TYPE(hw_cqe)) {
  504. /*
  505. * HW only validates 4 bits of MSN. So we must validate that
  506. * the MSN in the SEND is the next expected MSN. If its not,
  507. * then we complete this with T4_ERR_MSN and mark the wq in
  508. * error.
  509. */
  510. if (t4_rq_empty(wq)) {
  511. t4_set_wq_in_error(wq);
  512. ret = -EAGAIN;
  513. goto skip_cqe;
  514. }
  515. if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
  516. t4_set_wq_in_error(wq);
  517. hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
  518. goto proc_cqe;
  519. }
  520. goto proc_cqe;
  521. }
  522. /*
  523. * If we get here its a send completion.
  524. *
  525. * Handle out of order completion. These get stuffed
  526. * in the SW SQ. Then the SW SQ is walked to move any
  527. * now in-order completions into the SW CQ. This handles
  528. * 2 cases:
  529. * 1) reaping unsignaled WRs when the first subsequent
  530. * signaled WR is completed.
  531. * 2) out of order read completions.
  532. */
  533. if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
  534. struct t4_swsqe *swsqe;
  535. PDBG("%s out of order completion going in sw_sq at idx %u\n",
  536. __func__, CQE_WRID_SQ_IDX(hw_cqe));
  537. swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
  538. swsqe->cqe = *hw_cqe;
  539. swsqe->complete = 1;
  540. ret = -EAGAIN;
  541. goto flush_wq;
  542. }
  543. proc_cqe:
  544. *cqe = *hw_cqe;
  545. /*
  546. * Reap the associated WR(s) that are freed up with this
  547. * completion.
  548. */
  549. if (SQ_TYPE(hw_cqe)) {
  550. int idx = CQE_WRID_SQ_IDX(hw_cqe);
  551. BUG_ON(idx >= wq->sq.size);
  552. /*
  553. * Account for any unsignaled completions completed by
  554. * this signaled completion. In this case, cidx points
  555. * to the first unsignaled one, and idx points to the
  556. * signaled one. So adjust in_use based on this delta.
  557. * if this is not completing any unsigned wrs, then the
  558. * delta will be 0. Handle wrapping also!
  559. */
  560. if (idx < wq->sq.cidx)
  561. wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
  562. else
  563. wq->sq.in_use -= idx - wq->sq.cidx;
  564. BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
  565. wq->sq.cidx = (uint16_t)idx;
  566. PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
  567. *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
  568. if (c4iw_wr_log)
  569. c4iw_log_wr_stats(wq, hw_cqe);
  570. t4_sq_consume(wq);
  571. } else {
  572. PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
  573. *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
  574. BUG_ON(t4_rq_empty(wq));
  575. if (c4iw_wr_log)
  576. c4iw_log_wr_stats(wq, hw_cqe);
  577. t4_rq_consume(wq);
  578. goto skip_cqe;
  579. }
  580. flush_wq:
  581. /*
  582. * Flush any completed cqes that are now in-order.
  583. */
  584. flush_completed_wrs(wq, cq);
  585. skip_cqe:
  586. if (SW_CQE(hw_cqe)) {
  587. PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
  588. __func__, cq, cq->cqid, cq->sw_cidx);
  589. t4_swcq_consume(cq);
  590. } else {
  591. PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
  592. __func__, cq, cq->cqid, cq->cidx);
  593. t4_hwcq_consume(cq);
  594. }
  595. return ret;
  596. }
  597. /*
  598. * Get one cq entry from c4iw and map it to openib.
  599. *
  600. * Returns:
  601. * 0 cqe returned
  602. * -ENODATA EMPTY;
  603. * -EAGAIN caller must try again
  604. * any other -errno fatal error
  605. */
  606. static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
  607. {
  608. struct c4iw_qp *qhp = NULL;
  609. struct t4_cqe uninitialized_var(cqe), *rd_cqe;
  610. struct t4_wq *wq;
  611. u32 credit = 0;
  612. u8 cqe_flushed;
  613. u64 cookie = 0;
  614. int ret;
  615. ret = t4_next_cqe(&chp->cq, &rd_cqe);
  616. if (ret)
  617. return ret;
  618. qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
  619. if (!qhp)
  620. wq = NULL;
  621. else {
  622. spin_lock(&qhp->lock);
  623. wq = &(qhp->wq);
  624. }
  625. ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
  626. if (ret)
  627. goto out;
  628. wc->wr_id = cookie;
  629. wc->qp = &qhp->ibqp;
  630. wc->vendor_err = CQE_STATUS(&cqe);
  631. wc->wc_flags = 0;
  632. PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
  633. "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
  634. CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
  635. CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
  636. if (CQE_TYPE(&cqe) == 0) {
  637. if (!CQE_STATUS(&cqe))
  638. wc->byte_len = CQE_LEN(&cqe);
  639. else
  640. wc->byte_len = 0;
  641. wc->opcode = IB_WC_RECV;
  642. if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
  643. CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
  644. wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
  645. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  646. }
  647. } else {
  648. switch (CQE_OPCODE(&cqe)) {
  649. case FW_RI_RDMA_WRITE:
  650. wc->opcode = IB_WC_RDMA_WRITE;
  651. break;
  652. case FW_RI_READ_REQ:
  653. wc->opcode = IB_WC_RDMA_READ;
  654. wc->byte_len = CQE_LEN(&cqe);
  655. break;
  656. case FW_RI_SEND_WITH_INV:
  657. case FW_RI_SEND_WITH_SE_INV:
  658. wc->opcode = IB_WC_SEND;
  659. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  660. break;
  661. case FW_RI_SEND:
  662. case FW_RI_SEND_WITH_SE:
  663. wc->opcode = IB_WC_SEND;
  664. break;
  665. case FW_RI_LOCAL_INV:
  666. wc->opcode = IB_WC_LOCAL_INV;
  667. break;
  668. case FW_RI_FAST_REGISTER:
  669. wc->opcode = IB_WC_REG_MR;
  670. break;
  671. default:
  672. printk(KERN_ERR MOD "Unexpected opcode %d "
  673. "in the CQE received for QPID=0x%0x\n",
  674. CQE_OPCODE(&cqe), CQE_QPID(&cqe));
  675. ret = -EINVAL;
  676. goto out;
  677. }
  678. }
  679. if (cqe_flushed)
  680. wc->status = IB_WC_WR_FLUSH_ERR;
  681. else {
  682. switch (CQE_STATUS(&cqe)) {
  683. case T4_ERR_SUCCESS:
  684. wc->status = IB_WC_SUCCESS;
  685. break;
  686. case T4_ERR_STAG:
  687. wc->status = IB_WC_LOC_ACCESS_ERR;
  688. break;
  689. case T4_ERR_PDID:
  690. wc->status = IB_WC_LOC_PROT_ERR;
  691. break;
  692. case T4_ERR_QPID:
  693. case T4_ERR_ACCESS:
  694. wc->status = IB_WC_LOC_ACCESS_ERR;
  695. break;
  696. case T4_ERR_WRAP:
  697. wc->status = IB_WC_GENERAL_ERR;
  698. break;
  699. case T4_ERR_BOUND:
  700. wc->status = IB_WC_LOC_LEN_ERR;
  701. break;
  702. case T4_ERR_INVALIDATE_SHARED_MR:
  703. case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
  704. wc->status = IB_WC_MW_BIND_ERR;
  705. break;
  706. case T4_ERR_CRC:
  707. case T4_ERR_MARKER:
  708. case T4_ERR_PDU_LEN_ERR:
  709. case T4_ERR_OUT_OF_RQE:
  710. case T4_ERR_DDP_VERSION:
  711. case T4_ERR_RDMA_VERSION:
  712. case T4_ERR_DDP_QUEUE_NUM:
  713. case T4_ERR_MSN:
  714. case T4_ERR_TBIT:
  715. case T4_ERR_MO:
  716. case T4_ERR_MSN_RANGE:
  717. case T4_ERR_IRD_OVERFLOW:
  718. case T4_ERR_OPCODE:
  719. case T4_ERR_INTERNAL_ERR:
  720. wc->status = IB_WC_FATAL_ERR;
  721. break;
  722. case T4_ERR_SWFLUSH:
  723. wc->status = IB_WC_WR_FLUSH_ERR;
  724. break;
  725. default:
  726. printk(KERN_ERR MOD
  727. "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
  728. CQE_STATUS(&cqe), CQE_QPID(&cqe));
  729. wc->status = IB_WC_FATAL_ERR;
  730. }
  731. }
  732. out:
  733. if (wq) {
  734. if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
  735. if (t4_sq_empty(wq))
  736. complete(&qhp->sq_drained);
  737. if (t4_rq_empty(wq))
  738. complete(&qhp->rq_drained);
  739. }
  740. spin_unlock(&qhp->lock);
  741. }
  742. return ret;
  743. }
  744. int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  745. {
  746. struct c4iw_cq *chp;
  747. unsigned long flags;
  748. int npolled;
  749. int err = 0;
  750. chp = to_c4iw_cq(ibcq);
  751. spin_lock_irqsave(&chp->lock, flags);
  752. for (npolled = 0; npolled < num_entries; ++npolled) {
  753. do {
  754. err = c4iw_poll_cq_one(chp, wc + npolled);
  755. } while (err == -EAGAIN);
  756. if (err)
  757. break;
  758. }
  759. spin_unlock_irqrestore(&chp->lock, flags);
  760. return !err || err == -ENODATA ? npolled : err;
  761. }
  762. int c4iw_destroy_cq(struct ib_cq *ib_cq)
  763. {
  764. struct c4iw_cq *chp;
  765. struct c4iw_ucontext *ucontext;
  766. PDBG("%s ib_cq %p\n", __func__, ib_cq);
  767. chp = to_c4iw_cq(ib_cq);
  768. remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
  769. atomic_dec(&chp->refcnt);
  770. wait_event(chp->wait, !atomic_read(&chp->refcnt));
  771. ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
  772. : NULL;
  773. destroy_cq(&chp->rhp->rdev, &chp->cq,
  774. ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
  775. kfree(chp);
  776. return 0;
  777. }
  778. struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
  779. const struct ib_cq_init_attr *attr,
  780. struct ib_ucontext *ib_context,
  781. struct ib_udata *udata)
  782. {
  783. int entries = attr->cqe;
  784. int vector = attr->comp_vector;
  785. struct c4iw_dev *rhp;
  786. struct c4iw_cq *chp;
  787. struct c4iw_create_cq_resp uresp;
  788. struct c4iw_ucontext *ucontext = NULL;
  789. int ret;
  790. size_t memsize, hwentries;
  791. struct c4iw_mm_entry *mm, *mm2;
  792. PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
  793. if (attr->flags)
  794. return ERR_PTR(-EINVAL);
  795. rhp = to_c4iw_dev(ibdev);
  796. if (vector >= rhp->rdev.lldi.nciq)
  797. return ERR_PTR(-EINVAL);
  798. chp = kzalloc(sizeof(*chp), GFP_KERNEL);
  799. if (!chp)
  800. return ERR_PTR(-ENOMEM);
  801. if (ib_context)
  802. ucontext = to_c4iw_ucontext(ib_context);
  803. /* account for the status page. */
  804. entries++;
  805. /* IQ needs one extra entry to differentiate full vs empty. */
  806. entries++;
  807. /*
  808. * entries must be multiple of 16 for HW.
  809. */
  810. entries = roundup(entries, 16);
  811. /*
  812. * Make actual HW queue 2x to avoid cdix_inc overflows.
  813. */
  814. hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
  815. /*
  816. * Make HW queue at least 64 entries so GTS updates aren't too
  817. * frequent.
  818. */
  819. if (hwentries < 64)
  820. hwentries = 64;
  821. memsize = hwentries * sizeof *chp->cq.queue;
  822. /*
  823. * memsize must be a multiple of the page size if its a user cq.
  824. */
  825. if (ucontext)
  826. memsize = roundup(memsize, PAGE_SIZE);
  827. chp->cq.size = hwentries;
  828. chp->cq.memsize = memsize;
  829. chp->cq.vector = vector;
  830. ret = create_cq(&rhp->rdev, &chp->cq,
  831. ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
  832. if (ret)
  833. goto err1;
  834. chp->rhp = rhp;
  835. chp->cq.size--; /* status page */
  836. chp->ibcq.cqe = entries - 2;
  837. spin_lock_init(&chp->lock);
  838. spin_lock_init(&chp->comp_handler_lock);
  839. atomic_set(&chp->refcnt, 1);
  840. init_waitqueue_head(&chp->wait);
  841. ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
  842. if (ret)
  843. goto err2;
  844. if (ucontext) {
  845. mm = kmalloc(sizeof *mm, GFP_KERNEL);
  846. if (!mm)
  847. goto err3;
  848. mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
  849. if (!mm2)
  850. goto err4;
  851. uresp.qid_mask = rhp->rdev.cqmask;
  852. uresp.cqid = chp->cq.cqid;
  853. uresp.size = chp->cq.size;
  854. uresp.memsize = chp->cq.memsize;
  855. spin_lock(&ucontext->mmap_lock);
  856. uresp.key = ucontext->key;
  857. ucontext->key += PAGE_SIZE;
  858. uresp.gts_key = ucontext->key;
  859. ucontext->key += PAGE_SIZE;
  860. spin_unlock(&ucontext->mmap_lock);
  861. ret = ib_copy_to_udata(udata, &uresp,
  862. sizeof(uresp) - sizeof(uresp.reserved));
  863. if (ret)
  864. goto err5;
  865. mm->key = uresp.key;
  866. mm->addr = virt_to_phys(chp->cq.queue);
  867. mm->len = chp->cq.memsize;
  868. insert_mmap(ucontext, mm);
  869. mm2->key = uresp.gts_key;
  870. mm2->addr = chp->cq.bar2_pa;
  871. mm2->len = PAGE_SIZE;
  872. insert_mmap(ucontext, mm2);
  873. }
  874. PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
  875. __func__, chp->cq.cqid, chp, chp->cq.size,
  876. chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
  877. return &chp->ibcq;
  878. err5:
  879. kfree(mm2);
  880. err4:
  881. kfree(mm);
  882. err3:
  883. remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
  884. err2:
  885. destroy_cq(&chp->rhp->rdev, &chp->cq,
  886. ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
  887. err1:
  888. kfree(chp);
  889. return ERR_PTR(ret);
  890. }
  891. int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
  892. {
  893. return -ENOSYS;
  894. }
  895. int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
  896. {
  897. struct c4iw_cq *chp;
  898. int ret;
  899. unsigned long flag;
  900. chp = to_c4iw_cq(ibcq);
  901. spin_lock_irqsave(&chp->lock, flag);
  902. ret = t4_arm_cq(&chp->cq,
  903. (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
  904. spin_unlock_irqrestore(&chp->lock, flag);
  905. if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
  906. ret = 0;
  907. return ret;
  908. }