cq.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009
  1. /*
  2. * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include "iw_cxgb4.h"
  33. static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
  34. struct c4iw_dev_ucontext *uctx)
  35. {
  36. struct fw_ri_res_wr *res_wr;
  37. struct fw_ri_res *res;
  38. int wr_len;
  39. struct c4iw_wr_wait wr_wait;
  40. struct sk_buff *skb;
  41. int ret;
  42. wr_len = sizeof *res_wr + sizeof *res;
  43. skb = alloc_skb(wr_len, GFP_KERNEL);
  44. if (!skb)
  45. return -ENOMEM;
  46. set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
  47. res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
  48. memset(res_wr, 0, wr_len);
  49. res_wr->op_nres = cpu_to_be32(
  50. FW_WR_OP_V(FW_RI_RES_WR) |
  51. V_FW_RI_RES_WR_NRES(1) |
  52. FW_WR_COMPL_F);
  53. res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
  54. res_wr->cookie = (unsigned long) &wr_wait;
  55. res = res_wr->res;
  56. res->u.cq.restype = FW_RI_RES_TYPE_CQ;
  57. res->u.cq.op = FW_RI_RES_OP_RESET;
  58. res->u.cq.iqid = cpu_to_be32(cq->cqid);
  59. c4iw_init_wr_wait(&wr_wait);
  60. ret = c4iw_ofld_send(rdev, skb);
  61. if (!ret) {
  62. ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
  63. }
  64. kfree(cq->sw_queue);
  65. dma_free_coherent(&(rdev->lldi.pdev->dev),
  66. cq->memsize, cq->queue,
  67. dma_unmap_addr(cq, mapping));
  68. c4iw_put_cqid(rdev, cq->cqid, uctx);
  69. return ret;
  70. }
  71. static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
  72. struct c4iw_dev_ucontext *uctx)
  73. {
  74. struct fw_ri_res_wr *res_wr;
  75. struct fw_ri_res *res;
  76. int wr_len;
  77. int user = (uctx != &rdev->uctx);
  78. struct c4iw_wr_wait wr_wait;
  79. int ret;
  80. struct sk_buff *skb;
  81. cq->cqid = c4iw_get_cqid(rdev, uctx);
  82. if (!cq->cqid) {
  83. ret = -ENOMEM;
  84. goto err1;
  85. }
  86. if (!user) {
  87. cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
  88. if (!cq->sw_queue) {
  89. ret = -ENOMEM;
  90. goto err2;
  91. }
  92. }
  93. cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
  94. &cq->dma_addr, GFP_KERNEL);
  95. if (!cq->queue) {
  96. ret = -ENOMEM;
  97. goto err3;
  98. }
  99. dma_unmap_addr_set(cq, mapping, cq->dma_addr);
  100. memset(cq->queue, 0, cq->memsize);
  101. /* build fw_ri_res_wr */
  102. wr_len = sizeof *res_wr + sizeof *res;
  103. skb = alloc_skb(wr_len, GFP_KERNEL);
  104. if (!skb) {
  105. ret = -ENOMEM;
  106. goto err4;
  107. }
  108. set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
  109. res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
  110. memset(res_wr, 0, wr_len);
  111. res_wr->op_nres = cpu_to_be32(
  112. FW_WR_OP_V(FW_RI_RES_WR) |
  113. V_FW_RI_RES_WR_NRES(1) |
  114. FW_WR_COMPL_F);
  115. res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
  116. res_wr->cookie = (unsigned long) &wr_wait;
  117. res = res_wr->res;
  118. res->u.cq.restype = FW_RI_RES_TYPE_CQ;
  119. res->u.cq.op = FW_RI_RES_OP_WRITE;
  120. res->u.cq.iqid = cpu_to_be32(cq->cqid);
  121. res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
  122. V_FW_RI_RES_WR_IQANUS(0) |
  123. V_FW_RI_RES_WR_IQANUD(1) |
  124. F_FW_RI_RES_WR_IQANDST |
  125. V_FW_RI_RES_WR_IQANDSTINDEX(
  126. rdev->lldi.ciq_ids[cq->vector]));
  127. res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
  128. F_FW_RI_RES_WR_IQDROPRSS |
  129. V_FW_RI_RES_WR_IQPCIECH(2) |
  130. V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
  131. F_FW_RI_RES_WR_IQO |
  132. V_FW_RI_RES_WR_IQESIZE(1));
  133. res->u.cq.iqsize = cpu_to_be16(cq->size);
  134. res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
  135. c4iw_init_wr_wait(&wr_wait);
  136. ret = c4iw_ofld_send(rdev, skb);
  137. if (ret)
  138. goto err4;
  139. PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
  140. ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
  141. if (ret)
  142. goto err4;
  143. cq->gen = 1;
  144. cq->gts = rdev->lldi.gts_reg;
  145. cq->rdev = rdev;
  146. if (user) {
  147. cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
  148. (cq->cqid << rdev->cqshift);
  149. cq->ugts &= PAGE_MASK;
  150. }
  151. return 0;
  152. err4:
  153. dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
  154. dma_unmap_addr(cq, mapping));
  155. err3:
  156. kfree(cq->sw_queue);
  157. err2:
  158. c4iw_put_cqid(rdev, cq->cqid, uctx);
  159. err1:
  160. return ret;
  161. }
  162. static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
  163. {
  164. struct t4_cqe cqe;
  165. PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
  166. wq, cq, cq->sw_cidx, cq->sw_pidx);
  167. memset(&cqe, 0, sizeof(cqe));
  168. cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
  169. V_CQE_OPCODE(FW_RI_SEND) |
  170. V_CQE_TYPE(0) |
  171. V_CQE_SWCQE(1) |
  172. V_CQE_QPID(wq->sq.qid));
  173. cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
  174. cq->sw_queue[cq->sw_pidx] = cqe;
  175. t4_swcq_produce(cq);
  176. }
  177. int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
  178. {
  179. int flushed = 0;
  180. int in_use = wq->rq.in_use - count;
  181. BUG_ON(in_use < 0);
  182. PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
  183. wq, cq, wq->rq.in_use, count);
  184. while (in_use--) {
  185. insert_recv_cqe(wq, cq);
  186. flushed++;
  187. }
  188. return flushed;
  189. }
  190. static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
  191. struct t4_swsqe *swcqe)
  192. {
  193. struct t4_cqe cqe;
  194. PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
  195. wq, cq, cq->sw_cidx, cq->sw_pidx);
  196. memset(&cqe, 0, sizeof(cqe));
  197. cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
  198. V_CQE_OPCODE(swcqe->opcode) |
  199. V_CQE_TYPE(1) |
  200. V_CQE_SWCQE(1) |
  201. V_CQE_QPID(wq->sq.qid));
  202. CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
  203. cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
  204. cq->sw_queue[cq->sw_pidx] = cqe;
  205. t4_swcq_produce(cq);
  206. }
  207. static void advance_oldest_read(struct t4_wq *wq);
  208. int c4iw_flush_sq(struct c4iw_qp *qhp)
  209. {
  210. int flushed = 0;
  211. struct t4_wq *wq = &qhp->wq;
  212. struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
  213. struct t4_cq *cq = &chp->cq;
  214. int idx;
  215. struct t4_swsqe *swsqe;
  216. if (wq->sq.flush_cidx == -1)
  217. wq->sq.flush_cidx = wq->sq.cidx;
  218. idx = wq->sq.flush_cidx;
  219. BUG_ON(idx >= wq->sq.size);
  220. while (idx != wq->sq.pidx) {
  221. swsqe = &wq->sq.sw_sq[idx];
  222. BUG_ON(swsqe->flushed);
  223. swsqe->flushed = 1;
  224. insert_sq_cqe(wq, cq, swsqe);
  225. if (wq->sq.oldest_read == swsqe) {
  226. BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
  227. advance_oldest_read(wq);
  228. }
  229. flushed++;
  230. if (++idx == wq->sq.size)
  231. idx = 0;
  232. }
  233. wq->sq.flush_cidx += flushed;
  234. if (wq->sq.flush_cidx >= wq->sq.size)
  235. wq->sq.flush_cidx -= wq->sq.size;
  236. return flushed;
  237. }
  238. static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
  239. {
  240. struct t4_swsqe *swsqe;
  241. int cidx;
  242. if (wq->sq.flush_cidx == -1)
  243. wq->sq.flush_cidx = wq->sq.cidx;
  244. cidx = wq->sq.flush_cidx;
  245. BUG_ON(cidx > wq->sq.size);
  246. while (cidx != wq->sq.pidx) {
  247. swsqe = &wq->sq.sw_sq[cidx];
  248. if (!swsqe->signaled) {
  249. if (++cidx == wq->sq.size)
  250. cidx = 0;
  251. } else if (swsqe->complete) {
  252. BUG_ON(swsqe->flushed);
  253. /*
  254. * Insert this completed cqe into the swcq.
  255. */
  256. PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
  257. __func__, cidx, cq->sw_pidx);
  258. swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
  259. cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
  260. t4_swcq_produce(cq);
  261. swsqe->flushed = 1;
  262. if (++cidx == wq->sq.size)
  263. cidx = 0;
  264. wq->sq.flush_cidx = cidx;
  265. } else
  266. break;
  267. }
  268. }
  269. static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
  270. struct t4_cqe *read_cqe)
  271. {
  272. read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
  273. read_cqe->len = htonl(wq->sq.oldest_read->read_len);
  274. read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
  275. V_CQE_SWCQE(SW_CQE(hw_cqe)) |
  276. V_CQE_OPCODE(FW_RI_READ_REQ) |
  277. V_CQE_TYPE(1));
  278. read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
  279. }
  280. static void advance_oldest_read(struct t4_wq *wq)
  281. {
  282. u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
  283. if (rptr == wq->sq.size)
  284. rptr = 0;
  285. while (rptr != wq->sq.pidx) {
  286. wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
  287. if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
  288. return;
  289. if (++rptr == wq->sq.size)
  290. rptr = 0;
  291. }
  292. wq->sq.oldest_read = NULL;
  293. }
  294. /*
  295. * Move all CQEs from the HWCQ into the SWCQ.
  296. * Deal with out-of-order and/or completions that complete
  297. * prior unsignalled WRs.
  298. */
  299. void c4iw_flush_hw_cq(struct c4iw_cq *chp)
  300. {
  301. struct t4_cqe *hw_cqe, *swcqe, read_cqe;
  302. struct c4iw_qp *qhp;
  303. struct t4_swsqe *swsqe;
  304. int ret;
  305. PDBG("%s cqid 0x%x\n", __func__, chp->cq.cqid);
  306. ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
  307. /*
  308. * This logic is similar to poll_cq(), but not quite the same
  309. * unfortunately. Need to move pertinent HW CQEs to the SW CQ but
  310. * also do any translation magic that poll_cq() normally does.
  311. */
  312. while (!ret) {
  313. qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
  314. /*
  315. * drop CQEs with no associated QP
  316. */
  317. if (qhp == NULL)
  318. goto next_cqe;
  319. if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
  320. goto next_cqe;
  321. if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
  322. /* If we have reached here because of async
  323. * event or other error, and have egress error
  324. * then drop
  325. */
  326. if (CQE_TYPE(hw_cqe) == 1)
  327. goto next_cqe;
  328. /* drop peer2peer RTR reads.
  329. */
  330. if (CQE_WRID_STAG(hw_cqe) == 1)
  331. goto next_cqe;
  332. /*
  333. * Eat completions for unsignaled read WRs.
  334. */
  335. if (!qhp->wq.sq.oldest_read->signaled) {
  336. advance_oldest_read(&qhp->wq);
  337. goto next_cqe;
  338. }
  339. /*
  340. * Don't write to the HWCQ, create a new read req CQE
  341. * in local memory and move it into the swcq.
  342. */
  343. create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
  344. hw_cqe = &read_cqe;
  345. advance_oldest_read(&qhp->wq);
  346. }
  347. /* if its a SQ completion, then do the magic to move all the
  348. * unsignaled and now in-order completions into the swcq.
  349. */
  350. if (SQ_TYPE(hw_cqe)) {
  351. swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
  352. swsqe->cqe = *hw_cqe;
  353. swsqe->complete = 1;
  354. flush_completed_wrs(&qhp->wq, &chp->cq);
  355. } else {
  356. swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
  357. *swcqe = *hw_cqe;
  358. swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
  359. t4_swcq_produce(&chp->cq);
  360. }
  361. next_cqe:
  362. t4_hwcq_consume(&chp->cq);
  363. ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
  364. }
  365. }
  366. static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
  367. {
  368. if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
  369. return 0;
  370. if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
  371. return 0;
  372. if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
  373. return 0;
  374. if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
  375. return 0;
  376. return 1;
  377. }
  378. void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
  379. {
  380. struct t4_cqe *cqe;
  381. u32 ptr;
  382. *count = 0;
  383. PDBG("%s count zero %d\n", __func__, *count);
  384. ptr = cq->sw_cidx;
  385. while (ptr != cq->sw_pidx) {
  386. cqe = &cq->sw_queue[ptr];
  387. if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
  388. (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
  389. (*count)++;
  390. if (++ptr == cq->size)
  391. ptr = 0;
  392. }
  393. PDBG("%s cq %p count %d\n", __func__, cq, *count);
  394. }
  395. /*
  396. * poll_cq
  397. *
  398. * Caller must:
  399. * check the validity of the first CQE,
  400. * supply the wq assicated with the qpid.
  401. *
  402. * credit: cq credit to return to sge.
  403. * cqe_flushed: 1 iff the CQE is flushed.
  404. * cqe: copy of the polled CQE.
  405. *
  406. * return value:
  407. * 0 CQE returned ok.
  408. * -EAGAIN CQE skipped, try again.
  409. * -EOVERFLOW CQ overflow detected.
  410. */
  411. static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
  412. u8 *cqe_flushed, u64 *cookie, u32 *credit)
  413. {
  414. int ret = 0;
  415. struct t4_cqe *hw_cqe, read_cqe;
  416. *cqe_flushed = 0;
  417. *credit = 0;
  418. ret = t4_next_cqe(cq, &hw_cqe);
  419. if (ret)
  420. return ret;
  421. PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
  422. " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
  423. __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
  424. CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
  425. CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
  426. CQE_WRID_LOW(hw_cqe));
  427. /*
  428. * skip cqe's not affiliated with a QP.
  429. */
  430. if (wq == NULL) {
  431. ret = -EAGAIN;
  432. goto skip_cqe;
  433. }
  434. /*
  435. * skip hw cqe's if the wq is flushed.
  436. */
  437. if (wq->flushed && !SW_CQE(hw_cqe)) {
  438. ret = -EAGAIN;
  439. goto skip_cqe;
  440. }
  441. /*
  442. * skip TERMINATE cqes...
  443. */
  444. if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
  445. ret = -EAGAIN;
  446. goto skip_cqe;
  447. }
  448. /*
  449. * Gotta tweak READ completions:
  450. * 1) the cqe doesn't contain the sq_wptr from the wr.
  451. * 2) opcode not reflected from the wr.
  452. * 3) read_len not reflected from the wr.
  453. * 4) cq_type is RQ_TYPE not SQ_TYPE.
  454. */
  455. if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
  456. /* If we have reached here because of async
  457. * event or other error, and have egress error
  458. * then drop
  459. */
  460. if (CQE_TYPE(hw_cqe) == 1) {
  461. if (CQE_STATUS(hw_cqe))
  462. t4_set_wq_in_error(wq);
  463. ret = -EAGAIN;
  464. goto skip_cqe;
  465. }
  466. /* If this is an unsolicited read response, then the read
  467. * was generated by the kernel driver as part of peer-2-peer
  468. * connection setup. So ignore the completion.
  469. */
  470. if (CQE_WRID_STAG(hw_cqe) == 1) {
  471. if (CQE_STATUS(hw_cqe))
  472. t4_set_wq_in_error(wq);
  473. ret = -EAGAIN;
  474. goto skip_cqe;
  475. }
  476. /*
  477. * Eat completions for unsignaled read WRs.
  478. */
  479. if (!wq->sq.oldest_read->signaled) {
  480. advance_oldest_read(wq);
  481. ret = -EAGAIN;
  482. goto skip_cqe;
  483. }
  484. /*
  485. * Don't write to the HWCQ, so create a new read req CQE
  486. * in local memory.
  487. */
  488. create_read_req_cqe(wq, hw_cqe, &read_cqe);
  489. hw_cqe = &read_cqe;
  490. advance_oldest_read(wq);
  491. }
  492. if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
  493. *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
  494. t4_set_wq_in_error(wq);
  495. }
  496. /*
  497. * RECV completion.
  498. */
  499. if (RQ_TYPE(hw_cqe)) {
  500. /*
  501. * HW only validates 4 bits of MSN. So we must validate that
  502. * the MSN in the SEND is the next expected MSN. If its not,
  503. * then we complete this with T4_ERR_MSN and mark the wq in
  504. * error.
  505. */
  506. if (t4_rq_empty(wq)) {
  507. t4_set_wq_in_error(wq);
  508. ret = -EAGAIN;
  509. goto skip_cqe;
  510. }
  511. if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
  512. t4_set_wq_in_error(wq);
  513. hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
  514. goto proc_cqe;
  515. }
  516. goto proc_cqe;
  517. }
  518. /*
  519. * If we get here its a send completion.
  520. *
  521. * Handle out of order completion. These get stuffed
  522. * in the SW SQ. Then the SW SQ is walked to move any
  523. * now in-order completions into the SW CQ. This handles
  524. * 2 cases:
  525. * 1) reaping unsignaled WRs when the first subsequent
  526. * signaled WR is completed.
  527. * 2) out of order read completions.
  528. */
  529. if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
  530. struct t4_swsqe *swsqe;
  531. PDBG("%s out of order completion going in sw_sq at idx %u\n",
  532. __func__, CQE_WRID_SQ_IDX(hw_cqe));
  533. swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
  534. swsqe->cqe = *hw_cqe;
  535. swsqe->complete = 1;
  536. ret = -EAGAIN;
  537. goto flush_wq;
  538. }
  539. proc_cqe:
  540. *cqe = *hw_cqe;
  541. /*
  542. * Reap the associated WR(s) that are freed up with this
  543. * completion.
  544. */
  545. if (SQ_TYPE(hw_cqe)) {
  546. int idx = CQE_WRID_SQ_IDX(hw_cqe);
  547. BUG_ON(idx >= wq->sq.size);
  548. /*
  549. * Account for any unsignaled completions completed by
  550. * this signaled completion. In this case, cidx points
  551. * to the first unsignaled one, and idx points to the
  552. * signaled one. So adjust in_use based on this delta.
  553. * if this is not completing any unsigned wrs, then the
  554. * delta will be 0. Handle wrapping also!
  555. */
  556. if (idx < wq->sq.cidx)
  557. wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
  558. else
  559. wq->sq.in_use -= idx - wq->sq.cidx;
  560. BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
  561. wq->sq.cidx = (uint16_t)idx;
  562. PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
  563. *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
  564. if (c4iw_wr_log)
  565. c4iw_log_wr_stats(wq, hw_cqe);
  566. t4_sq_consume(wq);
  567. } else {
  568. PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
  569. *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
  570. BUG_ON(t4_rq_empty(wq));
  571. if (c4iw_wr_log)
  572. c4iw_log_wr_stats(wq, hw_cqe);
  573. t4_rq_consume(wq);
  574. goto skip_cqe;
  575. }
  576. flush_wq:
  577. /*
  578. * Flush any completed cqes that are now in-order.
  579. */
  580. flush_completed_wrs(wq, cq);
  581. skip_cqe:
  582. if (SW_CQE(hw_cqe)) {
  583. PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
  584. __func__, cq, cq->cqid, cq->sw_cidx);
  585. t4_swcq_consume(cq);
  586. } else {
  587. PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
  588. __func__, cq, cq->cqid, cq->cidx);
  589. t4_hwcq_consume(cq);
  590. }
  591. return ret;
  592. }
  593. /*
  594. * Get one cq entry from c4iw and map it to openib.
  595. *
  596. * Returns:
  597. * 0 cqe returned
  598. * -ENODATA EMPTY;
  599. * -EAGAIN caller must try again
  600. * any other -errno fatal error
  601. */
  602. static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
  603. {
  604. struct c4iw_qp *qhp = NULL;
  605. struct t4_cqe uninitialized_var(cqe), *rd_cqe;
  606. struct t4_wq *wq;
  607. u32 credit = 0;
  608. u8 cqe_flushed;
  609. u64 cookie = 0;
  610. int ret;
  611. ret = t4_next_cqe(&chp->cq, &rd_cqe);
  612. if (ret)
  613. return ret;
  614. qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
  615. if (!qhp)
  616. wq = NULL;
  617. else {
  618. spin_lock(&qhp->lock);
  619. wq = &(qhp->wq);
  620. }
  621. ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
  622. if (ret)
  623. goto out;
  624. wc->wr_id = cookie;
  625. wc->qp = &qhp->ibqp;
  626. wc->vendor_err = CQE_STATUS(&cqe);
  627. wc->wc_flags = 0;
  628. PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
  629. "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
  630. CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
  631. CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
  632. if (CQE_TYPE(&cqe) == 0) {
  633. if (!CQE_STATUS(&cqe))
  634. wc->byte_len = CQE_LEN(&cqe);
  635. else
  636. wc->byte_len = 0;
  637. wc->opcode = IB_WC_RECV;
  638. if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
  639. CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
  640. wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
  641. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  642. }
  643. } else {
  644. switch (CQE_OPCODE(&cqe)) {
  645. case FW_RI_RDMA_WRITE:
  646. wc->opcode = IB_WC_RDMA_WRITE;
  647. break;
  648. case FW_RI_READ_REQ:
  649. wc->opcode = IB_WC_RDMA_READ;
  650. wc->byte_len = CQE_LEN(&cqe);
  651. break;
  652. case FW_RI_SEND_WITH_INV:
  653. case FW_RI_SEND_WITH_SE_INV:
  654. wc->opcode = IB_WC_SEND;
  655. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  656. break;
  657. case FW_RI_SEND:
  658. case FW_RI_SEND_WITH_SE:
  659. wc->opcode = IB_WC_SEND;
  660. break;
  661. case FW_RI_BIND_MW:
  662. wc->opcode = IB_WC_BIND_MW;
  663. break;
  664. case FW_RI_LOCAL_INV:
  665. wc->opcode = IB_WC_LOCAL_INV;
  666. break;
  667. case FW_RI_FAST_REGISTER:
  668. wc->opcode = IB_WC_FAST_REG_MR;
  669. break;
  670. default:
  671. printk(KERN_ERR MOD "Unexpected opcode %d "
  672. "in the CQE received for QPID=0x%0x\n",
  673. CQE_OPCODE(&cqe), CQE_QPID(&cqe));
  674. ret = -EINVAL;
  675. goto out;
  676. }
  677. }
  678. if (cqe_flushed)
  679. wc->status = IB_WC_WR_FLUSH_ERR;
  680. else {
  681. switch (CQE_STATUS(&cqe)) {
  682. case T4_ERR_SUCCESS:
  683. wc->status = IB_WC_SUCCESS;
  684. break;
  685. case T4_ERR_STAG:
  686. wc->status = IB_WC_LOC_ACCESS_ERR;
  687. break;
  688. case T4_ERR_PDID:
  689. wc->status = IB_WC_LOC_PROT_ERR;
  690. break;
  691. case T4_ERR_QPID:
  692. case T4_ERR_ACCESS:
  693. wc->status = IB_WC_LOC_ACCESS_ERR;
  694. break;
  695. case T4_ERR_WRAP:
  696. wc->status = IB_WC_GENERAL_ERR;
  697. break;
  698. case T4_ERR_BOUND:
  699. wc->status = IB_WC_LOC_LEN_ERR;
  700. break;
  701. case T4_ERR_INVALIDATE_SHARED_MR:
  702. case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
  703. wc->status = IB_WC_MW_BIND_ERR;
  704. break;
  705. case T4_ERR_CRC:
  706. case T4_ERR_MARKER:
  707. case T4_ERR_PDU_LEN_ERR:
  708. case T4_ERR_OUT_OF_RQE:
  709. case T4_ERR_DDP_VERSION:
  710. case T4_ERR_RDMA_VERSION:
  711. case T4_ERR_DDP_QUEUE_NUM:
  712. case T4_ERR_MSN:
  713. case T4_ERR_TBIT:
  714. case T4_ERR_MO:
  715. case T4_ERR_MSN_RANGE:
  716. case T4_ERR_IRD_OVERFLOW:
  717. case T4_ERR_OPCODE:
  718. case T4_ERR_INTERNAL_ERR:
  719. wc->status = IB_WC_FATAL_ERR;
  720. break;
  721. case T4_ERR_SWFLUSH:
  722. wc->status = IB_WC_WR_FLUSH_ERR;
  723. break;
  724. default:
  725. printk(KERN_ERR MOD
  726. "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
  727. CQE_STATUS(&cqe), CQE_QPID(&cqe));
  728. ret = -EINVAL;
  729. }
  730. }
  731. out:
  732. if (wq)
  733. spin_unlock(&qhp->lock);
  734. return ret;
  735. }
  736. int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  737. {
  738. struct c4iw_cq *chp;
  739. unsigned long flags;
  740. int npolled;
  741. int err = 0;
  742. chp = to_c4iw_cq(ibcq);
  743. spin_lock_irqsave(&chp->lock, flags);
  744. for (npolled = 0; npolled < num_entries; ++npolled) {
  745. do {
  746. err = c4iw_poll_cq_one(chp, wc + npolled);
  747. } while (err == -EAGAIN);
  748. if (err)
  749. break;
  750. }
  751. spin_unlock_irqrestore(&chp->lock, flags);
  752. return !err || err == -ENODATA ? npolled : err;
  753. }
  754. int c4iw_destroy_cq(struct ib_cq *ib_cq)
  755. {
  756. struct c4iw_cq *chp;
  757. struct c4iw_ucontext *ucontext;
  758. PDBG("%s ib_cq %p\n", __func__, ib_cq);
  759. chp = to_c4iw_cq(ib_cq);
  760. remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
  761. atomic_dec(&chp->refcnt);
  762. wait_event(chp->wait, !atomic_read(&chp->refcnt));
  763. ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
  764. : NULL;
  765. destroy_cq(&chp->rhp->rdev, &chp->cq,
  766. ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
  767. kfree(chp);
  768. return 0;
  769. }
  770. struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
  771. int vector, struct ib_ucontext *ib_context,
  772. struct ib_udata *udata)
  773. {
  774. struct c4iw_dev *rhp;
  775. struct c4iw_cq *chp;
  776. struct c4iw_create_cq_resp uresp;
  777. struct c4iw_ucontext *ucontext = NULL;
  778. int ret;
  779. size_t memsize, hwentries;
  780. struct c4iw_mm_entry *mm, *mm2;
  781. PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
  782. rhp = to_c4iw_dev(ibdev);
  783. if (vector >= rhp->rdev.lldi.nciq)
  784. return ERR_PTR(-EINVAL);
  785. chp = kzalloc(sizeof(*chp), GFP_KERNEL);
  786. if (!chp)
  787. return ERR_PTR(-ENOMEM);
  788. if (ib_context)
  789. ucontext = to_c4iw_ucontext(ib_context);
  790. /* account for the status page. */
  791. entries++;
  792. /* IQ needs one extra entry to differentiate full vs empty. */
  793. entries++;
  794. /*
  795. * entries must be multiple of 16 for HW.
  796. */
  797. entries = roundup(entries, 16);
  798. /*
  799. * Make actual HW queue 2x to avoid cdix_inc overflows.
  800. */
  801. hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
  802. /*
  803. * Make HW queue at least 64 entries so GTS updates aren't too
  804. * frequent.
  805. */
  806. if (hwentries < 64)
  807. hwentries = 64;
  808. memsize = hwentries * sizeof *chp->cq.queue;
  809. /*
  810. * memsize must be a multiple of the page size if its a user cq.
  811. */
  812. if (ucontext)
  813. memsize = roundup(memsize, PAGE_SIZE);
  814. chp->cq.size = hwentries;
  815. chp->cq.memsize = memsize;
  816. chp->cq.vector = vector;
  817. ret = create_cq(&rhp->rdev, &chp->cq,
  818. ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
  819. if (ret)
  820. goto err1;
  821. chp->rhp = rhp;
  822. chp->cq.size--; /* status page */
  823. chp->ibcq.cqe = entries - 2;
  824. spin_lock_init(&chp->lock);
  825. spin_lock_init(&chp->comp_handler_lock);
  826. atomic_set(&chp->refcnt, 1);
  827. init_waitqueue_head(&chp->wait);
  828. ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
  829. if (ret)
  830. goto err2;
  831. if (ucontext) {
  832. mm = kmalloc(sizeof *mm, GFP_KERNEL);
  833. if (!mm)
  834. goto err3;
  835. mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
  836. if (!mm2)
  837. goto err4;
  838. uresp.qid_mask = rhp->rdev.cqmask;
  839. uresp.cqid = chp->cq.cqid;
  840. uresp.size = chp->cq.size;
  841. uresp.memsize = chp->cq.memsize;
  842. spin_lock(&ucontext->mmap_lock);
  843. uresp.key = ucontext->key;
  844. ucontext->key += PAGE_SIZE;
  845. uresp.gts_key = ucontext->key;
  846. ucontext->key += PAGE_SIZE;
  847. spin_unlock(&ucontext->mmap_lock);
  848. ret = ib_copy_to_udata(udata, &uresp,
  849. sizeof(uresp) - sizeof(uresp.reserved));
  850. if (ret)
  851. goto err5;
  852. mm->key = uresp.key;
  853. mm->addr = virt_to_phys(chp->cq.queue);
  854. mm->len = chp->cq.memsize;
  855. insert_mmap(ucontext, mm);
  856. mm2->key = uresp.gts_key;
  857. mm2->addr = chp->cq.ugts;
  858. mm2->len = PAGE_SIZE;
  859. insert_mmap(ucontext, mm2);
  860. }
  861. PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
  862. __func__, chp->cq.cqid, chp, chp->cq.size,
  863. chp->cq.memsize,
  864. (unsigned long long) chp->cq.dma_addr);
  865. return &chp->ibcq;
  866. err5:
  867. kfree(mm2);
  868. err4:
  869. kfree(mm);
  870. err3:
  871. remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
  872. err2:
  873. destroy_cq(&chp->rhp->rdev, &chp->cq,
  874. ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
  875. err1:
  876. kfree(chp);
  877. return ERR_PTR(ret);
  878. }
  879. int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
  880. {
  881. return -ENOSYS;
  882. }
  883. int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
  884. {
  885. struct c4iw_cq *chp;
  886. int ret;
  887. unsigned long flag;
  888. chp = to_c4iw_cq(ibcq);
  889. spin_lock_irqsave(&chp->lock, flag);
  890. ret = t4_arm_cq(&chp->cq,
  891. (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
  892. spin_unlock_irqrestore(&chp->lock, flag);
  893. if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
  894. ret = 0;
  895. return ret;
  896. }