cq.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426
  1. /*
  2. * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/kref.h>
  33. #include <rdma/ib_umem.h>
  34. #include <rdma/ib_user_verbs.h>
  35. #include <rdma/ib_cache.h>
  36. #include "mlx5_ib.h"
  37. static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
  38. {
  39. struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
  40. ibcq->comp_handler(ibcq, ibcq->cq_context);
  41. }
  42. static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
  43. {
  44. struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
  45. struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
  46. struct ib_cq *ibcq = &cq->ibcq;
  47. struct ib_event event;
  48. if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
  49. mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
  50. type, mcq->cqn);
  51. return;
  52. }
  53. if (ibcq->event_handler) {
  54. event.device = &dev->ib_dev;
  55. event.event = IB_EVENT_CQ_ERR;
  56. event.element.cq = ibcq;
  57. ibcq->event_handler(&event, ibcq->cq_context);
  58. }
  59. }
  60. static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size)
  61. {
  62. return mlx5_buf_offset(&buf->buf, n * size);
  63. }
  64. static void *get_cqe(struct mlx5_ib_cq *cq, int n)
  65. {
  66. return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz);
  67. }
  68. static u8 sw_ownership_bit(int n, int nent)
  69. {
  70. return (n & nent) ? 1 : 0;
  71. }
  72. static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
  73. {
  74. void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
  75. struct mlx5_cqe64 *cqe64;
  76. cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
  77. if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) &&
  78. !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
  79. return cqe;
  80. } else {
  81. return NULL;
  82. }
  83. }
  84. static void *next_cqe_sw(struct mlx5_ib_cq *cq)
  85. {
  86. return get_sw_cqe(cq, cq->mcq.cons_index);
  87. }
  88. static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
  89. {
  90. switch (wq->wr_data[idx]) {
  91. case MLX5_IB_WR_UMR:
  92. return 0;
  93. case IB_WR_LOCAL_INV:
  94. return IB_WC_LOCAL_INV;
  95. case IB_WR_REG_MR:
  96. return IB_WC_REG_MR;
  97. default:
  98. pr_warn("unknown completion status\n");
  99. return 0;
  100. }
  101. }
  102. static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
  103. struct mlx5_ib_wq *wq, int idx)
  104. {
  105. wc->wc_flags = 0;
  106. switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
  107. case MLX5_OPCODE_RDMA_WRITE_IMM:
  108. wc->wc_flags |= IB_WC_WITH_IMM;
  109. case MLX5_OPCODE_RDMA_WRITE:
  110. wc->opcode = IB_WC_RDMA_WRITE;
  111. break;
  112. case MLX5_OPCODE_SEND_IMM:
  113. wc->wc_flags |= IB_WC_WITH_IMM;
  114. case MLX5_OPCODE_SEND:
  115. case MLX5_OPCODE_SEND_INVAL:
  116. wc->opcode = IB_WC_SEND;
  117. break;
  118. case MLX5_OPCODE_RDMA_READ:
  119. wc->opcode = IB_WC_RDMA_READ;
  120. wc->byte_len = be32_to_cpu(cqe->byte_cnt);
  121. break;
  122. case MLX5_OPCODE_ATOMIC_CS:
  123. wc->opcode = IB_WC_COMP_SWAP;
  124. wc->byte_len = 8;
  125. break;
  126. case MLX5_OPCODE_ATOMIC_FA:
  127. wc->opcode = IB_WC_FETCH_ADD;
  128. wc->byte_len = 8;
  129. break;
  130. case MLX5_OPCODE_ATOMIC_MASKED_CS:
  131. wc->opcode = IB_WC_MASKED_COMP_SWAP;
  132. wc->byte_len = 8;
  133. break;
  134. case MLX5_OPCODE_ATOMIC_MASKED_FA:
  135. wc->opcode = IB_WC_MASKED_FETCH_ADD;
  136. wc->byte_len = 8;
  137. break;
  138. case MLX5_OPCODE_UMR:
  139. wc->opcode = get_umr_comp(wq, idx);
  140. break;
  141. }
  142. }
  143. enum {
  144. MLX5_GRH_IN_BUFFER = 1,
  145. MLX5_GRH_IN_CQE = 2,
  146. };
  147. static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
  148. struct mlx5_ib_qp *qp)
  149. {
  150. enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
  151. struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
  152. struct mlx5_ib_srq *srq;
  153. struct mlx5_ib_wq *wq;
  154. u16 wqe_ctr;
  155. u8 roce_packet_type;
  156. bool vlan_present;
  157. u8 g;
  158. if (qp->ibqp.srq || qp->ibqp.xrcd) {
  159. struct mlx5_core_srq *msrq = NULL;
  160. if (qp->ibqp.xrcd) {
  161. msrq = mlx5_core_get_srq(dev->mdev,
  162. be32_to_cpu(cqe->srqn));
  163. srq = to_mibsrq(msrq);
  164. } else {
  165. srq = to_msrq(qp->ibqp.srq);
  166. }
  167. if (srq) {
  168. wqe_ctr = be16_to_cpu(cqe->wqe_counter);
  169. wc->wr_id = srq->wrid[wqe_ctr];
  170. mlx5_ib_free_srq_wqe(srq, wqe_ctr);
  171. if (msrq && atomic_dec_and_test(&msrq->refcount))
  172. complete(&msrq->free);
  173. }
  174. } else {
  175. wq = &qp->rq;
  176. wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
  177. ++wq->tail;
  178. }
  179. wc->byte_len = be32_to_cpu(cqe->byte_cnt);
  180. switch (cqe->op_own >> 4) {
  181. case MLX5_CQE_RESP_WR_IMM:
  182. wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
  183. wc->wc_flags = IB_WC_WITH_IMM;
  184. wc->ex.imm_data = cqe->imm_inval_pkey;
  185. break;
  186. case MLX5_CQE_RESP_SEND:
  187. wc->opcode = IB_WC_RECV;
  188. wc->wc_flags = IB_WC_IP_CSUM_OK;
  189. if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
  190. (cqe->hds_ip_ext & CQE_L4_OK))))
  191. wc->wc_flags = 0;
  192. break;
  193. case MLX5_CQE_RESP_SEND_IMM:
  194. wc->opcode = IB_WC_RECV;
  195. wc->wc_flags = IB_WC_WITH_IMM;
  196. wc->ex.imm_data = cqe->imm_inval_pkey;
  197. break;
  198. case MLX5_CQE_RESP_SEND_INV:
  199. wc->opcode = IB_WC_RECV;
  200. wc->wc_flags = IB_WC_WITH_INVALIDATE;
  201. wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
  202. break;
  203. }
  204. wc->slid = be16_to_cpu(cqe->slid);
  205. wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
  206. wc->dlid_path_bits = cqe->ml_path;
  207. g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
  208. wc->wc_flags |= g ? IB_WC_GRH : 0;
  209. if (unlikely(is_qp1(qp->ibqp.qp_type))) {
  210. u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
  211. ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
  212. &wc->pkey_index);
  213. } else {
  214. wc->pkey_index = 0;
  215. }
  216. if (ll != IB_LINK_LAYER_ETHERNET) {
  217. wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
  218. return;
  219. }
  220. vlan_present = cqe->l4_l3_hdr_type & 0x1;
  221. roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
  222. if (vlan_present) {
  223. wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
  224. wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
  225. wc->wc_flags |= IB_WC_WITH_VLAN;
  226. } else {
  227. wc->sl = 0;
  228. }
  229. switch (roce_packet_type) {
  230. case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
  231. wc->network_hdr_type = RDMA_NETWORK_IB;
  232. break;
  233. case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
  234. wc->network_hdr_type = RDMA_NETWORK_IPV6;
  235. break;
  236. case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4:
  237. wc->network_hdr_type = RDMA_NETWORK_IPV4;
  238. break;
  239. }
  240. wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
  241. }
  242. static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
  243. {
  244. __be32 *p = (__be32 *)cqe;
  245. int i;
  246. mlx5_ib_warn(dev, "dump error cqe\n");
  247. for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4)
  248. pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]),
  249. be32_to_cpu(p[1]), be32_to_cpu(p[2]),
  250. be32_to_cpu(p[3]));
  251. }
  252. static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
  253. struct mlx5_err_cqe *cqe,
  254. struct ib_wc *wc)
  255. {
  256. int dump = 1;
  257. switch (cqe->syndrome) {
  258. case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
  259. wc->status = IB_WC_LOC_LEN_ERR;
  260. break;
  261. case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
  262. wc->status = IB_WC_LOC_QP_OP_ERR;
  263. break;
  264. case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
  265. wc->status = IB_WC_LOC_PROT_ERR;
  266. break;
  267. case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
  268. dump = 0;
  269. wc->status = IB_WC_WR_FLUSH_ERR;
  270. break;
  271. case MLX5_CQE_SYNDROME_MW_BIND_ERR:
  272. wc->status = IB_WC_MW_BIND_ERR;
  273. break;
  274. case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
  275. wc->status = IB_WC_BAD_RESP_ERR;
  276. break;
  277. case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
  278. wc->status = IB_WC_LOC_ACCESS_ERR;
  279. break;
  280. case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
  281. wc->status = IB_WC_REM_INV_REQ_ERR;
  282. break;
  283. case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
  284. wc->status = IB_WC_REM_ACCESS_ERR;
  285. break;
  286. case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
  287. wc->status = IB_WC_REM_OP_ERR;
  288. break;
  289. case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
  290. wc->status = IB_WC_RETRY_EXC_ERR;
  291. dump = 0;
  292. break;
  293. case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
  294. wc->status = IB_WC_RNR_RETRY_EXC_ERR;
  295. dump = 0;
  296. break;
  297. case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
  298. wc->status = IB_WC_REM_ABORT_ERR;
  299. break;
  300. default:
  301. wc->status = IB_WC_GENERAL_ERR;
  302. break;
  303. }
  304. wc->vendor_err = cqe->vendor_err_synd;
  305. if (dump)
  306. dump_cqe(dev, cqe);
  307. }
  308. static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx)
  309. {
  310. /* TBD: waiting decision
  311. */
  312. return 0;
  313. }
  314. static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx)
  315. {
  316. struct mlx5_wqe_data_seg *dpseg;
  317. void *addr;
  318. dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) +
  319. sizeof(struct mlx5_wqe_raddr_seg) +
  320. sizeof(struct mlx5_wqe_atomic_seg);
  321. addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr);
  322. return addr;
  323. }
  324. static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
  325. uint16_t idx)
  326. {
  327. void *addr;
  328. int byte_count;
  329. int i;
  330. if (!is_atomic_response(qp, idx))
  331. return;
  332. byte_count = be32_to_cpu(cqe64->byte_cnt);
  333. addr = mlx5_get_atomic_laddr(qp, idx);
  334. if (byte_count == 4) {
  335. *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr));
  336. } else {
  337. for (i = 0; i < byte_count; i += 8) {
  338. *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr));
  339. addr += 8;
  340. }
  341. }
  342. return;
  343. }
  344. static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
  345. u16 tail, u16 head)
  346. {
  347. u16 idx;
  348. do {
  349. idx = tail & (qp->sq.wqe_cnt - 1);
  350. handle_atomic(qp, cqe64, idx);
  351. if (idx == head)
  352. break;
  353. tail = qp->sq.w_list[idx].next;
  354. } while (1);
  355. tail = qp->sq.w_list[idx].next;
  356. qp->sq.last_poll = tail;
  357. }
  358. static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
  359. {
  360. mlx5_buf_free(dev->mdev, &buf->buf);
  361. }
  362. static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
  363. struct ib_sig_err *item)
  364. {
  365. u16 syndrome = be16_to_cpu(cqe->syndrome);
  366. #define GUARD_ERR (1 << 13)
  367. #define APPTAG_ERR (1 << 12)
  368. #define REFTAG_ERR (1 << 11)
  369. if (syndrome & GUARD_ERR) {
  370. item->err_type = IB_SIG_BAD_GUARD;
  371. item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
  372. item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
  373. } else
  374. if (syndrome & REFTAG_ERR) {
  375. item->err_type = IB_SIG_BAD_REFTAG;
  376. item->expected = be32_to_cpu(cqe->expected_reftag);
  377. item->actual = be32_to_cpu(cqe->actual_reftag);
  378. } else
  379. if (syndrome & APPTAG_ERR) {
  380. item->err_type = IB_SIG_BAD_APPTAG;
  381. item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
  382. item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
  383. } else {
  384. pr_err("Got signature completion error with bad syndrome %04x\n",
  385. syndrome);
  386. }
  387. item->sig_err_offset = be64_to_cpu(cqe->err_offset);
  388. item->key = be32_to_cpu(cqe->mkey);
  389. }
  390. static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries,
  391. struct ib_wc *wc, int *npolled)
  392. {
  393. struct mlx5_ib_wq *wq;
  394. unsigned int cur;
  395. unsigned int idx;
  396. int np;
  397. int i;
  398. wq = &qp->sq;
  399. cur = wq->head - wq->tail;
  400. np = *npolled;
  401. if (cur == 0)
  402. return;
  403. for (i = 0; i < cur && np < num_entries; i++) {
  404. idx = wq->last_poll & (wq->wqe_cnt - 1);
  405. wc->wr_id = wq->wrid[idx];
  406. wc->status = IB_WC_WR_FLUSH_ERR;
  407. wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
  408. wq->tail++;
  409. np++;
  410. wc->qp = &qp->ibqp;
  411. wc++;
  412. wq->last_poll = wq->w_list[idx].next;
  413. }
  414. *npolled = np;
  415. }
  416. static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries,
  417. struct ib_wc *wc, int *npolled)
  418. {
  419. struct mlx5_ib_wq *wq;
  420. unsigned int cur;
  421. int np;
  422. int i;
  423. wq = &qp->rq;
  424. cur = wq->head - wq->tail;
  425. np = *npolled;
  426. if (cur == 0)
  427. return;
  428. for (i = 0; i < cur && np < num_entries; i++) {
  429. wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
  430. wc->status = IB_WC_WR_FLUSH_ERR;
  431. wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
  432. wq->tail++;
  433. np++;
  434. wc->qp = &qp->ibqp;
  435. wc++;
  436. }
  437. *npolled = np;
  438. }
  439. static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
  440. struct ib_wc *wc, int *npolled)
  441. {
  442. struct mlx5_ib_qp *qp;
  443. *npolled = 0;
  444. /* Find uncompleted WQEs belonging to that cq and retrun mmics ones */
  445. list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
  446. sw_send_comp(qp, num_entries, wc + *npolled, npolled);
  447. if (*npolled >= num_entries)
  448. return;
  449. }
  450. list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
  451. sw_recv_comp(qp, num_entries, wc + *npolled, npolled);
  452. if (*npolled >= num_entries)
  453. return;
  454. }
  455. }
  456. static int mlx5_poll_one(struct mlx5_ib_cq *cq,
  457. struct mlx5_ib_qp **cur_qp,
  458. struct ib_wc *wc)
  459. {
  460. struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
  461. struct mlx5_err_cqe *err_cqe;
  462. struct mlx5_cqe64 *cqe64;
  463. struct mlx5_core_qp *mqp;
  464. struct mlx5_ib_wq *wq;
  465. struct mlx5_sig_err_cqe *sig_err_cqe;
  466. struct mlx5_core_mkey *mmkey;
  467. struct mlx5_ib_mr *mr;
  468. uint8_t opcode;
  469. uint32_t qpn;
  470. u16 wqe_ctr;
  471. void *cqe;
  472. int idx;
  473. repoll:
  474. cqe = next_cqe_sw(cq);
  475. if (!cqe)
  476. return -EAGAIN;
  477. cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
  478. ++cq->mcq.cons_index;
  479. /* Make sure we read CQ entry contents after we've checked the
  480. * ownership bit.
  481. */
  482. rmb();
  483. opcode = cqe64->op_own >> 4;
  484. if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
  485. if (likely(cq->resize_buf)) {
  486. free_cq_buf(dev, &cq->buf);
  487. cq->buf = *cq->resize_buf;
  488. kfree(cq->resize_buf);
  489. cq->resize_buf = NULL;
  490. goto repoll;
  491. } else {
  492. mlx5_ib_warn(dev, "unexpected resize cqe\n");
  493. }
  494. }
  495. qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
  496. if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
  497. /* We do not have to take the QP table lock here,
  498. * because CQs will be locked while QPs are removed
  499. * from the table.
  500. */
  501. mqp = __mlx5_qp_lookup(dev->mdev, qpn);
  502. *cur_qp = to_mibqp(mqp);
  503. }
  504. wc->qp = &(*cur_qp)->ibqp;
  505. switch (opcode) {
  506. case MLX5_CQE_REQ:
  507. wq = &(*cur_qp)->sq;
  508. wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
  509. idx = wqe_ctr & (wq->wqe_cnt - 1);
  510. handle_good_req(wc, cqe64, wq, idx);
  511. handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
  512. wc->wr_id = wq->wrid[idx];
  513. wq->tail = wq->wqe_head[idx] + 1;
  514. wc->status = IB_WC_SUCCESS;
  515. break;
  516. case MLX5_CQE_RESP_WR_IMM:
  517. case MLX5_CQE_RESP_SEND:
  518. case MLX5_CQE_RESP_SEND_IMM:
  519. case MLX5_CQE_RESP_SEND_INV:
  520. handle_responder(wc, cqe64, *cur_qp);
  521. wc->status = IB_WC_SUCCESS;
  522. break;
  523. case MLX5_CQE_RESIZE_CQ:
  524. break;
  525. case MLX5_CQE_REQ_ERR:
  526. case MLX5_CQE_RESP_ERR:
  527. err_cqe = (struct mlx5_err_cqe *)cqe64;
  528. mlx5_handle_error_cqe(dev, err_cqe, wc);
  529. mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
  530. opcode == MLX5_CQE_REQ_ERR ?
  531. "Requestor" : "Responder", cq->mcq.cqn);
  532. mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
  533. err_cqe->syndrome, err_cqe->vendor_err_synd);
  534. if (opcode == MLX5_CQE_REQ_ERR) {
  535. wq = &(*cur_qp)->sq;
  536. wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
  537. idx = wqe_ctr & (wq->wqe_cnt - 1);
  538. wc->wr_id = wq->wrid[idx];
  539. wq->tail = wq->wqe_head[idx] + 1;
  540. } else {
  541. struct mlx5_ib_srq *srq;
  542. if ((*cur_qp)->ibqp.srq) {
  543. srq = to_msrq((*cur_qp)->ibqp.srq);
  544. wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
  545. wc->wr_id = srq->wrid[wqe_ctr];
  546. mlx5_ib_free_srq_wqe(srq, wqe_ctr);
  547. } else {
  548. wq = &(*cur_qp)->rq;
  549. wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
  550. ++wq->tail;
  551. }
  552. }
  553. break;
  554. case MLX5_CQE_SIG_ERR:
  555. sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
  556. read_lock(&dev->mdev->priv.mkey_table.lock);
  557. mmkey = __mlx5_mr_lookup(dev->mdev,
  558. mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
  559. mr = to_mibmr(mmkey);
  560. get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
  561. mr->sig->sig_err_exists = true;
  562. mr->sig->sigerr_count++;
  563. mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
  564. cq->mcq.cqn, mr->sig->err_item.key,
  565. mr->sig->err_item.err_type,
  566. mr->sig->err_item.sig_err_offset,
  567. mr->sig->err_item.expected,
  568. mr->sig->err_item.actual);
  569. read_unlock(&dev->mdev->priv.mkey_table.lock);
  570. goto repoll;
  571. }
  572. return 0;
  573. }
  574. static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
  575. struct ib_wc *wc)
  576. {
  577. struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
  578. struct mlx5_ib_wc *soft_wc, *next;
  579. int npolled = 0;
  580. list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
  581. if (npolled >= num_entries)
  582. break;
  583. mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
  584. cq->mcq.cqn);
  585. wc[npolled++] = soft_wc->wc;
  586. list_del(&soft_wc->list);
  587. kfree(soft_wc);
  588. }
  589. return npolled;
  590. }
  591. int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  592. {
  593. struct mlx5_ib_cq *cq = to_mcq(ibcq);
  594. struct mlx5_ib_qp *cur_qp = NULL;
  595. struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
  596. struct mlx5_core_dev *mdev = dev->mdev;
  597. unsigned long flags;
  598. int soft_polled = 0;
  599. int npolled;
  600. spin_lock_irqsave(&cq->lock, flags);
  601. if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
  602. mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
  603. goto out;
  604. }
  605. if (unlikely(!list_empty(&cq->wc_list)))
  606. soft_polled = poll_soft_wc(cq, num_entries, wc);
  607. for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
  608. if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
  609. break;
  610. }
  611. if (npolled)
  612. mlx5_cq_set_ci(&cq->mcq);
  613. out:
  614. spin_unlock_irqrestore(&cq->lock, flags);
  615. return soft_polled + npolled;
  616. }
  617. int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
  618. {
  619. struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
  620. struct mlx5_ib_cq *cq = to_mcq(ibcq);
  621. void __iomem *uar_page = mdev->priv.uar->map;
  622. unsigned long irq_flags;
  623. int ret = 0;
  624. spin_lock_irqsave(&cq->lock, irq_flags);
  625. if (cq->notify_flags != IB_CQ_NEXT_COMP)
  626. cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
  627. if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
  628. ret = 1;
  629. spin_unlock_irqrestore(&cq->lock, irq_flags);
  630. mlx5_cq_arm(&cq->mcq,
  631. (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
  632. MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
  633. uar_page, to_mcq(ibcq)->mcq.cons_index);
  634. return ret;
  635. }
  636. static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
  637. int nent, int cqe_size)
  638. {
  639. int err;
  640. err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf);
  641. if (err)
  642. return err;
  643. buf->cqe_size = cqe_size;
  644. buf->nent = nent;
  645. return 0;
  646. }
  647. static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
  648. struct ib_ucontext *context, struct mlx5_ib_cq *cq,
  649. int entries, u32 **cqb,
  650. int *cqe_size, int *index, int *inlen)
  651. {
  652. struct mlx5_ib_create_cq ucmd = {};
  653. size_t ucmdlen;
  654. int page_shift;
  655. __be64 *pas;
  656. int npages;
  657. int ncont;
  658. void *cqc;
  659. int err;
  660. ucmdlen =
  661. (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
  662. sizeof(ucmd)) ? (sizeof(ucmd) -
  663. sizeof(ucmd.reserved)) : sizeof(ucmd);
  664. if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
  665. return -EFAULT;
  666. if (ucmdlen == sizeof(ucmd) &&
  667. ucmd.reserved != 0)
  668. return -EINVAL;
  669. if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
  670. return -EINVAL;
  671. *cqe_size = ucmd.cqe_size;
  672. cq->buf.umem = ib_umem_get(context, ucmd.buf_addr,
  673. entries * ucmd.cqe_size,
  674. IB_ACCESS_LOCAL_WRITE, 1);
  675. if (IS_ERR(cq->buf.umem)) {
  676. err = PTR_ERR(cq->buf.umem);
  677. return err;
  678. }
  679. err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
  680. &cq->db);
  681. if (err)
  682. goto err_umem;
  683. mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
  684. &ncont, NULL);
  685. mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
  686. ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
  687. *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
  688. MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
  689. *cqb = mlx5_vzalloc(*inlen);
  690. if (!*cqb) {
  691. err = -ENOMEM;
  692. goto err_db;
  693. }
  694. pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
  695. mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
  696. cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
  697. MLX5_SET(cqc, cqc, log_page_size,
  698. page_shift - MLX5_ADAPTER_PAGE_SHIFT);
  699. *index = to_mucontext(context)->bfregi.sys_pages[0];
  700. if (ucmd.cqe_comp_en == 1) {
  701. if (unlikely((*cqe_size != 64) ||
  702. !MLX5_CAP_GEN(dev->mdev, cqe_compression))) {
  703. err = -EOPNOTSUPP;
  704. mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
  705. *cqe_size);
  706. goto err_cqb;
  707. }
  708. if (unlikely(!ucmd.cqe_comp_res_format ||
  709. !(ucmd.cqe_comp_res_format <
  710. MLX5_IB_CQE_RES_RESERVED) ||
  711. (ucmd.cqe_comp_res_format &
  712. (ucmd.cqe_comp_res_format - 1)))) {
  713. err = -EOPNOTSUPP;
  714. mlx5_ib_warn(dev, "CQE compression res format %d is not supported!\n",
  715. ucmd.cqe_comp_res_format);
  716. goto err_cqb;
  717. }
  718. MLX5_SET(cqc, cqc, cqe_comp_en, 1);
  719. MLX5_SET(cqc, cqc, mini_cqe_res_format,
  720. ilog2(ucmd.cqe_comp_res_format));
  721. }
  722. return 0;
  723. err_cqb:
  724. kfree(*cqb);
  725. err_db:
  726. mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
  727. err_umem:
  728. ib_umem_release(cq->buf.umem);
  729. return err;
  730. }
  731. static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
  732. {
  733. mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
  734. ib_umem_release(cq->buf.umem);
  735. }
  736. static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
  737. {
  738. int i;
  739. void *cqe;
  740. struct mlx5_cqe64 *cqe64;
  741. for (i = 0; i < buf->nent; i++) {
  742. cqe = get_cqe_from_buf(buf, i, buf->cqe_size);
  743. cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
  744. cqe64->op_own = MLX5_CQE_INVALID << 4;
  745. }
  746. }
  747. static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
  748. int entries, int cqe_size,
  749. u32 **cqb, int *index, int *inlen)
  750. {
  751. __be64 *pas;
  752. void *cqc;
  753. int err;
  754. err = mlx5_db_alloc(dev->mdev, &cq->db);
  755. if (err)
  756. return err;
  757. cq->mcq.set_ci_db = cq->db.db;
  758. cq->mcq.arm_db = cq->db.db + 1;
  759. cq->mcq.cqe_sz = cqe_size;
  760. err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size);
  761. if (err)
  762. goto err_db;
  763. init_cq_buf(cq, &cq->buf);
  764. *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
  765. MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages;
  766. *cqb = mlx5_vzalloc(*inlen);
  767. if (!*cqb) {
  768. err = -ENOMEM;
  769. goto err_buf;
  770. }
  771. pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
  772. mlx5_fill_page_array(&cq->buf.buf, pas);
  773. cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
  774. MLX5_SET(cqc, cqc, log_page_size,
  775. cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
  776. *index = dev->mdev->priv.uar->index;
  777. return 0;
  778. err_buf:
  779. free_cq_buf(dev, &cq->buf);
  780. err_db:
  781. mlx5_db_free(dev->mdev, &cq->db);
  782. return err;
  783. }
  784. static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
  785. {
  786. free_cq_buf(dev, &cq->buf);
  787. mlx5_db_free(dev->mdev, &cq->db);
  788. }
  789. static void notify_soft_wc_handler(struct work_struct *work)
  790. {
  791. struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
  792. notify_work);
  793. cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
  794. }
  795. struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
  796. const struct ib_cq_init_attr *attr,
  797. struct ib_ucontext *context,
  798. struct ib_udata *udata)
  799. {
  800. int entries = attr->cqe;
  801. int vector = attr->comp_vector;
  802. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  803. struct mlx5_ib_cq *cq;
  804. int uninitialized_var(index);
  805. int uninitialized_var(inlen);
  806. u32 *cqb = NULL;
  807. void *cqc;
  808. int cqe_size;
  809. unsigned int irqn;
  810. int eqn;
  811. int err;
  812. if (entries < 0 ||
  813. (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
  814. return ERR_PTR(-EINVAL);
  815. if (check_cq_create_flags(attr->flags))
  816. return ERR_PTR(-EOPNOTSUPP);
  817. entries = roundup_pow_of_two(entries + 1);
  818. if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
  819. return ERR_PTR(-EINVAL);
  820. cq = kzalloc(sizeof(*cq), GFP_KERNEL);
  821. if (!cq)
  822. return ERR_PTR(-ENOMEM);
  823. cq->ibcq.cqe = entries - 1;
  824. mutex_init(&cq->resize_mutex);
  825. spin_lock_init(&cq->lock);
  826. cq->resize_buf = NULL;
  827. cq->resize_umem = NULL;
  828. cq->create_flags = attr->flags;
  829. INIT_LIST_HEAD(&cq->list_send_qp);
  830. INIT_LIST_HEAD(&cq->list_recv_qp);
  831. if (context) {
  832. err = create_cq_user(dev, udata, context, cq, entries,
  833. &cqb, &cqe_size, &index, &inlen);
  834. if (err)
  835. goto err_create;
  836. } else {
  837. cqe_size = cache_line_size() == 128 ? 128 : 64;
  838. err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
  839. &index, &inlen);
  840. if (err)
  841. goto err_create;
  842. INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
  843. }
  844. err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
  845. if (err)
  846. goto err_cqb;
  847. cq->cqe_size = cqe_size;
  848. cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
  849. MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
  850. MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
  851. MLX5_SET(cqc, cqc, uar_page, index);
  852. MLX5_SET(cqc, cqc, c_eqn, eqn);
  853. MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
  854. if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
  855. MLX5_SET(cqc, cqc, oi, 1);
  856. err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
  857. if (err)
  858. goto err_cqb;
  859. mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
  860. cq->mcq.irqn = irqn;
  861. if (context)
  862. cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
  863. else
  864. cq->mcq.comp = mlx5_ib_cq_comp;
  865. cq->mcq.event = mlx5_ib_cq_event;
  866. INIT_LIST_HEAD(&cq->wc_list);
  867. if (context)
  868. if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
  869. err = -EFAULT;
  870. goto err_cmd;
  871. }
  872. kvfree(cqb);
  873. return &cq->ibcq;
  874. err_cmd:
  875. mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
  876. err_cqb:
  877. kvfree(cqb);
  878. if (context)
  879. destroy_cq_user(cq, context);
  880. else
  881. destroy_cq_kernel(dev, cq);
  882. err_create:
  883. kfree(cq);
  884. return ERR_PTR(err);
  885. }
  886. int mlx5_ib_destroy_cq(struct ib_cq *cq)
  887. {
  888. struct mlx5_ib_dev *dev = to_mdev(cq->device);
  889. struct mlx5_ib_cq *mcq = to_mcq(cq);
  890. struct ib_ucontext *context = NULL;
  891. if (cq->uobject)
  892. context = cq->uobject->context;
  893. mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
  894. if (context)
  895. destroy_cq_user(mcq, context);
  896. else
  897. destroy_cq_kernel(dev, mcq);
  898. kfree(mcq);
  899. return 0;
  900. }
  901. static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
  902. {
  903. return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
  904. }
  905. void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
  906. {
  907. struct mlx5_cqe64 *cqe64, *dest64;
  908. void *cqe, *dest;
  909. u32 prod_index;
  910. int nfreed = 0;
  911. u8 owner_bit;
  912. if (!cq)
  913. return;
  914. /* First we need to find the current producer index, so we
  915. * know where to start cleaning from. It doesn't matter if HW
  916. * adds new entries after this loop -- the QP we're worried
  917. * about is already in RESET, so the new entries won't come
  918. * from our QP and therefore don't need to be checked.
  919. */
  920. for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
  921. if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
  922. break;
  923. /* Now sweep backwards through the CQ, removing CQ entries
  924. * that match our QP by copying older entries on top of them.
  925. */
  926. while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
  927. cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
  928. cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
  929. if (is_equal_rsn(cqe64, rsn)) {
  930. if (srq && (ntohl(cqe64->srqn) & 0xffffff))
  931. mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
  932. ++nfreed;
  933. } else if (nfreed) {
  934. dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
  935. dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
  936. owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
  937. memcpy(dest, cqe, cq->mcq.cqe_sz);
  938. dest64->op_own = owner_bit |
  939. (dest64->op_own & ~MLX5_CQE_OWNER_MASK);
  940. }
  941. }
  942. if (nfreed) {
  943. cq->mcq.cons_index += nfreed;
  944. /* Make sure update of buffer contents is done before
  945. * updating consumer index.
  946. */
  947. wmb();
  948. mlx5_cq_set_ci(&cq->mcq);
  949. }
  950. }
  951. void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
  952. {
  953. if (!cq)
  954. return;
  955. spin_lock_irq(&cq->lock);
  956. __mlx5_ib_cq_clean(cq, qpn, srq);
  957. spin_unlock_irq(&cq->lock);
  958. }
  959. int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
  960. {
  961. struct mlx5_ib_dev *dev = to_mdev(cq->device);
  962. struct mlx5_ib_cq *mcq = to_mcq(cq);
  963. int err;
  964. if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
  965. return -ENOSYS;
  966. err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
  967. cq_period, cq_count);
  968. if (err)
  969. mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
  970. return err;
  971. }
  972. static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
  973. int entries, struct ib_udata *udata, int *npas,
  974. int *page_shift, int *cqe_size)
  975. {
  976. struct mlx5_ib_resize_cq ucmd;
  977. struct ib_umem *umem;
  978. int err;
  979. int npages;
  980. struct ib_ucontext *context = cq->buf.umem->context;
  981. err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
  982. if (err)
  983. return err;
  984. if (ucmd.reserved0 || ucmd.reserved1)
  985. return -EINVAL;
  986. umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
  987. IB_ACCESS_LOCAL_WRITE, 1);
  988. if (IS_ERR(umem)) {
  989. err = PTR_ERR(umem);
  990. return err;
  991. }
  992. mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
  993. npas, NULL);
  994. cq->resize_umem = umem;
  995. *cqe_size = ucmd.cqe_size;
  996. return 0;
  997. }
  998. static void un_resize_user(struct mlx5_ib_cq *cq)
  999. {
  1000. ib_umem_release(cq->resize_umem);
  1001. }
  1002. static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
  1003. int entries, int cqe_size)
  1004. {
  1005. int err;
  1006. cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
  1007. if (!cq->resize_buf)
  1008. return -ENOMEM;
  1009. err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size);
  1010. if (err)
  1011. goto ex;
  1012. init_cq_buf(cq, cq->resize_buf);
  1013. return 0;
  1014. ex:
  1015. kfree(cq->resize_buf);
  1016. return err;
  1017. }
  1018. static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
  1019. {
  1020. free_cq_buf(dev, cq->resize_buf);
  1021. cq->resize_buf = NULL;
  1022. }
  1023. static int copy_resize_cqes(struct mlx5_ib_cq *cq)
  1024. {
  1025. struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
  1026. struct mlx5_cqe64 *scqe64;
  1027. struct mlx5_cqe64 *dcqe64;
  1028. void *start_cqe;
  1029. void *scqe;
  1030. void *dcqe;
  1031. int ssize;
  1032. int dsize;
  1033. int i;
  1034. u8 sw_own;
  1035. ssize = cq->buf.cqe_size;
  1036. dsize = cq->resize_buf->cqe_size;
  1037. if (ssize != dsize) {
  1038. mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
  1039. return -EINVAL;
  1040. }
  1041. i = cq->mcq.cons_index;
  1042. scqe = get_sw_cqe(cq, i);
  1043. scqe64 = ssize == 64 ? scqe : scqe + 64;
  1044. start_cqe = scqe;
  1045. if (!scqe) {
  1046. mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
  1047. return -EINVAL;
  1048. }
  1049. while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
  1050. dcqe = get_cqe_from_buf(cq->resize_buf,
  1051. (i + 1) & (cq->resize_buf->nent),
  1052. dsize);
  1053. dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
  1054. sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
  1055. memcpy(dcqe, scqe, dsize);
  1056. dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
  1057. ++i;
  1058. scqe = get_sw_cqe(cq, i);
  1059. scqe64 = ssize == 64 ? scqe : scqe + 64;
  1060. if (!scqe) {
  1061. mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
  1062. return -EINVAL;
  1063. }
  1064. if (scqe == start_cqe) {
  1065. pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
  1066. cq->mcq.cqn);
  1067. return -ENOMEM;
  1068. }
  1069. }
  1070. ++cq->mcq.cons_index;
  1071. return 0;
  1072. }
  1073. int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
  1074. {
  1075. struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
  1076. struct mlx5_ib_cq *cq = to_mcq(ibcq);
  1077. void *cqc;
  1078. u32 *in;
  1079. int err;
  1080. int npas;
  1081. __be64 *pas;
  1082. int page_shift;
  1083. int inlen;
  1084. int uninitialized_var(cqe_size);
  1085. unsigned long flags;
  1086. if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
  1087. pr_info("Firmware does not support resize CQ\n");
  1088. return -ENOSYS;
  1089. }
  1090. if (entries < 1 ||
  1091. entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
  1092. mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
  1093. entries,
  1094. 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
  1095. return -EINVAL;
  1096. }
  1097. entries = roundup_pow_of_two(entries + 1);
  1098. if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
  1099. return -EINVAL;
  1100. if (entries == ibcq->cqe + 1)
  1101. return 0;
  1102. mutex_lock(&cq->resize_mutex);
  1103. if (udata) {
  1104. err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
  1105. &cqe_size);
  1106. } else {
  1107. cqe_size = 64;
  1108. err = resize_kernel(dev, cq, entries, cqe_size);
  1109. if (!err) {
  1110. npas = cq->resize_buf->buf.npages;
  1111. page_shift = cq->resize_buf->buf.page_shift;
  1112. }
  1113. }
  1114. if (err)
  1115. goto ex;
  1116. inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
  1117. MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
  1118. in = mlx5_vzalloc(inlen);
  1119. if (!in) {
  1120. err = -ENOMEM;
  1121. goto ex_resize;
  1122. }
  1123. pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
  1124. if (udata)
  1125. mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
  1126. pas, 0);
  1127. else
  1128. mlx5_fill_page_array(&cq->resize_buf->buf, pas);
  1129. MLX5_SET(modify_cq_in, in,
  1130. modify_field_select_resize_field_select.resize_field_select.resize_field_select,
  1131. MLX5_MODIFY_CQ_MASK_LOG_SIZE |
  1132. MLX5_MODIFY_CQ_MASK_PG_OFFSET |
  1133. MLX5_MODIFY_CQ_MASK_PG_SIZE);
  1134. cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
  1135. MLX5_SET(cqc, cqc, log_page_size,
  1136. page_shift - MLX5_ADAPTER_PAGE_SHIFT);
  1137. MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
  1138. MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
  1139. MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
  1140. MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
  1141. err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
  1142. if (err)
  1143. goto ex_alloc;
  1144. if (udata) {
  1145. cq->ibcq.cqe = entries - 1;
  1146. ib_umem_release(cq->buf.umem);
  1147. cq->buf.umem = cq->resize_umem;
  1148. cq->resize_umem = NULL;
  1149. } else {
  1150. struct mlx5_ib_cq_buf tbuf;
  1151. int resized = 0;
  1152. spin_lock_irqsave(&cq->lock, flags);
  1153. if (cq->resize_buf) {
  1154. err = copy_resize_cqes(cq);
  1155. if (!err) {
  1156. tbuf = cq->buf;
  1157. cq->buf = *cq->resize_buf;
  1158. kfree(cq->resize_buf);
  1159. cq->resize_buf = NULL;
  1160. resized = 1;
  1161. }
  1162. }
  1163. cq->ibcq.cqe = entries - 1;
  1164. spin_unlock_irqrestore(&cq->lock, flags);
  1165. if (resized)
  1166. free_cq_buf(dev, &tbuf);
  1167. }
  1168. mutex_unlock(&cq->resize_mutex);
  1169. kvfree(in);
  1170. return 0;
  1171. ex_alloc:
  1172. kvfree(in);
  1173. ex_resize:
  1174. if (udata)
  1175. un_resize_user(cq);
  1176. else
  1177. un_resize_kernel(dev, cq);
  1178. ex:
  1179. mutex_unlock(&cq->resize_mutex);
  1180. return err;
  1181. }
  1182. int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
  1183. {
  1184. struct mlx5_ib_cq *cq;
  1185. if (!ibcq)
  1186. return 128;
  1187. cq = to_mcq(ibcq);
  1188. return cq->cqe_size;
  1189. }
  1190. /* Called from atomic context */
  1191. int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
  1192. {
  1193. struct mlx5_ib_wc *soft_wc;
  1194. struct mlx5_ib_cq *cq = to_mcq(ibcq);
  1195. unsigned long flags;
  1196. soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
  1197. if (!soft_wc)
  1198. return -ENOMEM;
  1199. soft_wc->wc = *wc;
  1200. spin_lock_irqsave(&cq->lock, flags);
  1201. list_add_tail(&soft_wc->list, &cq->wc_list);
  1202. if (cq->notify_flags == IB_CQ_NEXT_COMP ||
  1203. wc->status != IB_WC_SUCCESS) {
  1204. cq->notify_flags = 0;
  1205. schedule_work(&cq->notify_work);
  1206. }
  1207. spin_unlock_irqrestore(&cq->lock, flags);
  1208. return 0;
  1209. }