pvrdma_cq.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425
  1. /*
  2. * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of EITHER the GNU General Public License
  6. * version 2 as published by the Free Software Foundation or the BSD
  7. * 2-Clause License. This program is distributed in the hope that it
  8. * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
  9. * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
  10. * See the GNU General Public License version 2 for more details at
  11. * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program available in the file COPYING in the main
  15. * directory of this source tree.
  16. *
  17. * The BSD 2-Clause License
  18. *
  19. * Redistribution and use in source and binary forms, with or
  20. * without modification, are permitted provided that the following
  21. * conditions are met:
  22. *
  23. * - Redistributions of source code must retain the above
  24. * copyright notice, this list of conditions and the following
  25. * disclaimer.
  26. *
  27. * - Redistributions in binary form must reproduce the above
  28. * copyright notice, this list of conditions and the following
  29. * disclaimer in the documentation and/or other materials
  30. * provided with the distribution.
  31. *
  32. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  33. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  34. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  35. * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  36. * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
  37. * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  38. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  39. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  40. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  41. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  42. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  43. * OF THE POSSIBILITY OF SUCH DAMAGE.
  44. */
  45. #include <asm/page.h>
  46. #include <linux/io.h>
  47. #include <linux/wait.h>
  48. #include <rdma/ib_addr.h>
  49. #include <rdma/ib_smi.h>
  50. #include <rdma/ib_user_verbs.h>
  51. #include "pvrdma.h"
  52. /**
  53. * pvrdma_req_notify_cq - request notification for a completion queue
  54. * @ibcq: the completion queue
  55. * @notify_flags: notification flags
  56. *
  57. * @return: 0 for success.
  58. */
  59. int pvrdma_req_notify_cq(struct ib_cq *ibcq,
  60. enum ib_cq_notify_flags notify_flags)
  61. {
  62. struct pvrdma_dev *dev = to_vdev(ibcq->device);
  63. struct pvrdma_cq *cq = to_vcq(ibcq);
  64. u32 val = cq->cq_handle;
  65. val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
  66. PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
  67. pvrdma_write_uar_cq(dev, val);
  68. return 0;
  69. }
  70. /**
  71. * pvrdma_create_cq - create completion queue
  72. * @ibdev: the device
  73. * @attr: completion queue attributes
  74. * @context: user context
  75. * @udata: user data
  76. *
  77. * @return: ib_cq completion queue pointer on success,
  78. * otherwise returns negative errno.
  79. */
  80. struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
  81. const struct ib_cq_init_attr *attr,
  82. struct ib_ucontext *context,
  83. struct ib_udata *udata)
  84. {
  85. int entries = attr->cqe;
  86. struct pvrdma_dev *dev = to_vdev(ibdev);
  87. struct pvrdma_cq *cq;
  88. int ret;
  89. int npages;
  90. unsigned long flags;
  91. union pvrdma_cmd_req req;
  92. union pvrdma_cmd_resp rsp;
  93. struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
  94. struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
  95. struct pvrdma_create_cq ucmd;
  96. BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
  97. entries = roundup_pow_of_two(entries);
  98. if (entries < 1 || entries > dev->dsr->caps.max_cqe)
  99. return ERR_PTR(-EINVAL);
  100. if (!atomic_add_unless(&dev->num_cqs, 1, dev->dsr->caps.max_cq))
  101. return ERR_PTR(-ENOMEM);
  102. cq = kzalloc(sizeof(*cq), GFP_KERNEL);
  103. if (!cq) {
  104. atomic_dec(&dev->num_cqs);
  105. return ERR_PTR(-ENOMEM);
  106. }
  107. cq->ibcq.cqe = entries;
  108. if (context) {
  109. if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
  110. ret = -EFAULT;
  111. goto err_cq;
  112. }
  113. cq->umem = ib_umem_get(context, ucmd.buf_addr, ucmd.buf_size,
  114. IB_ACCESS_LOCAL_WRITE, 1);
  115. if (IS_ERR(cq->umem)) {
  116. ret = PTR_ERR(cq->umem);
  117. goto err_cq;
  118. }
  119. npages = ib_umem_page_count(cq->umem);
  120. } else {
  121. cq->is_kernel = true;
  122. /* One extra page for shared ring state */
  123. npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
  124. PAGE_SIZE - 1) / PAGE_SIZE;
  125. /* Skip header page. */
  126. cq->offset = PAGE_SIZE;
  127. }
  128. if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
  129. dev_warn(&dev->pdev->dev,
  130. "overflow pages in completion queue\n");
  131. ret = -EINVAL;
  132. goto err_umem;
  133. }
  134. ret = pvrdma_page_dir_init(dev, &cq->pdir, npages, cq->is_kernel);
  135. if (ret) {
  136. dev_warn(&dev->pdev->dev,
  137. "could not allocate page directory\n");
  138. goto err_umem;
  139. }
  140. /* Ring state is always the first page. Set in library for user cq. */
  141. if (cq->is_kernel)
  142. cq->ring_state = cq->pdir.pages[0];
  143. else
  144. pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
  145. atomic_set(&cq->refcnt, 1);
  146. init_waitqueue_head(&cq->wait);
  147. spin_lock_init(&cq->cq_lock);
  148. memset(cmd, 0, sizeof(*cmd));
  149. cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ;
  150. cmd->nchunks = npages;
  151. cmd->ctx_handle = (context) ?
  152. (u64)to_vucontext(context)->ctx_handle : 0;
  153. cmd->cqe = entries;
  154. cmd->pdir_dma = cq->pdir.dir_dma;
  155. ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP);
  156. if (ret < 0) {
  157. dev_warn(&dev->pdev->dev,
  158. "could not create completion queue, error: %d\n", ret);
  159. goto err_page_dir;
  160. }
  161. cq->ibcq.cqe = resp->cqe;
  162. cq->cq_handle = resp->cq_handle;
  163. spin_lock_irqsave(&dev->cq_tbl_lock, flags);
  164. dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
  165. spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
  166. if (context) {
  167. cq->uar = &(to_vucontext(context)->uar);
  168. /* Copy udata back. */
  169. if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) {
  170. dev_warn(&dev->pdev->dev,
  171. "failed to copy back udata\n");
  172. pvrdma_destroy_cq(&cq->ibcq);
  173. return ERR_PTR(-EINVAL);
  174. }
  175. }
  176. return &cq->ibcq;
  177. err_page_dir:
  178. pvrdma_page_dir_cleanup(dev, &cq->pdir);
  179. err_umem:
  180. if (context)
  181. ib_umem_release(cq->umem);
  182. err_cq:
  183. atomic_dec(&dev->num_cqs);
  184. kfree(cq);
  185. return ERR_PTR(ret);
  186. }
  187. static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
  188. {
  189. atomic_dec(&cq->refcnt);
  190. wait_event(cq->wait, !atomic_read(&cq->refcnt));
  191. if (!cq->is_kernel)
  192. ib_umem_release(cq->umem);
  193. pvrdma_page_dir_cleanup(dev, &cq->pdir);
  194. kfree(cq);
  195. }
  196. /**
  197. * pvrdma_destroy_cq - destroy completion queue
  198. * @cq: the completion queue to destroy.
  199. *
  200. * @return: 0 for success.
  201. */
  202. int pvrdma_destroy_cq(struct ib_cq *cq)
  203. {
  204. struct pvrdma_cq *vcq = to_vcq(cq);
  205. union pvrdma_cmd_req req;
  206. struct pvrdma_cmd_destroy_cq *cmd = &req.destroy_cq;
  207. struct pvrdma_dev *dev = to_vdev(cq->device);
  208. unsigned long flags;
  209. int ret;
  210. memset(cmd, 0, sizeof(*cmd));
  211. cmd->hdr.cmd = PVRDMA_CMD_DESTROY_CQ;
  212. cmd->cq_handle = vcq->cq_handle;
  213. ret = pvrdma_cmd_post(dev, &req, NULL, 0);
  214. if (ret < 0)
  215. dev_warn(&dev->pdev->dev,
  216. "could not destroy completion queue, error: %d\n",
  217. ret);
  218. /* free cq's resources */
  219. spin_lock_irqsave(&dev->cq_tbl_lock, flags);
  220. dev->cq_tbl[vcq->cq_handle] = NULL;
  221. spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
  222. pvrdma_free_cq(dev, vcq);
  223. atomic_dec(&dev->num_cqs);
  224. return ret;
  225. }
  226. /**
  227. * pvrdma_modify_cq - modify the CQ moderation parameters
  228. * @ibcq: the CQ to modify
  229. * @cq_count: number of CQEs that will trigger an event
  230. * @cq_period: max period of time in usec before triggering an event
  231. *
  232. * @return: -EOPNOTSUPP as CQ resize is not supported.
  233. */
  234. int pvrdma_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
  235. {
  236. return -EOPNOTSUPP;
  237. }
  238. static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
  239. {
  240. return (struct pvrdma_cqe *)pvrdma_page_dir_get_ptr(
  241. &cq->pdir,
  242. cq->offset +
  243. sizeof(struct pvrdma_cqe) * i);
  244. }
  245. void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq)
  246. {
  247. int head;
  248. int has_data;
  249. if (!cq->is_kernel)
  250. return;
  251. /* Lock held */
  252. has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
  253. cq->ibcq.cqe, &head);
  254. if (unlikely(has_data > 0)) {
  255. int items;
  256. int curr;
  257. int tail = pvrdma_idx(&cq->ring_state->rx.prod_tail,
  258. cq->ibcq.cqe);
  259. struct pvrdma_cqe *cqe;
  260. struct pvrdma_cqe *curr_cqe;
  261. items = (tail > head) ? (tail - head) :
  262. (cq->ibcq.cqe - head + tail);
  263. curr = --tail;
  264. while (items-- > 0) {
  265. if (curr < 0)
  266. curr = cq->ibcq.cqe - 1;
  267. if (tail < 0)
  268. tail = cq->ibcq.cqe - 1;
  269. curr_cqe = get_cqe(cq, curr);
  270. if ((curr_cqe->qp & 0xFFFF) != qp->qp_handle) {
  271. if (curr != tail) {
  272. cqe = get_cqe(cq, tail);
  273. *cqe = *curr_cqe;
  274. }
  275. tail--;
  276. } else {
  277. pvrdma_idx_ring_inc(
  278. &cq->ring_state->rx.cons_head,
  279. cq->ibcq.cqe);
  280. }
  281. curr--;
  282. }
  283. }
  284. }
  285. static int pvrdma_poll_one(struct pvrdma_cq *cq, struct pvrdma_qp **cur_qp,
  286. struct ib_wc *wc)
  287. {
  288. struct pvrdma_dev *dev = to_vdev(cq->ibcq.device);
  289. int has_data;
  290. unsigned int head;
  291. bool tried = false;
  292. struct pvrdma_cqe *cqe;
  293. retry:
  294. has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
  295. cq->ibcq.cqe, &head);
  296. if (has_data == 0) {
  297. if (tried)
  298. return -EAGAIN;
  299. pvrdma_write_uar_cq(dev, cq->cq_handle | PVRDMA_UAR_CQ_POLL);
  300. tried = true;
  301. goto retry;
  302. } else if (has_data == PVRDMA_INVALID_IDX) {
  303. dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
  304. return -EAGAIN;
  305. }
  306. cqe = get_cqe(cq, head);
  307. /* Ensure cqe is valid. */
  308. rmb();
  309. if (dev->qp_tbl[cqe->qp & 0xffff])
  310. *cur_qp = (struct pvrdma_qp *)dev->qp_tbl[cqe->qp & 0xffff];
  311. else
  312. return -EAGAIN;
  313. wc->opcode = pvrdma_wc_opcode_to_ib(cqe->opcode);
  314. wc->status = pvrdma_wc_status_to_ib(cqe->status);
  315. wc->wr_id = cqe->wr_id;
  316. wc->qp = &(*cur_qp)->ibqp;
  317. wc->byte_len = cqe->byte_len;
  318. wc->ex.imm_data = cqe->imm_data;
  319. wc->src_qp = cqe->src_qp;
  320. wc->wc_flags = pvrdma_wc_flags_to_ib(cqe->wc_flags);
  321. wc->pkey_index = cqe->pkey_index;
  322. wc->slid = cqe->slid;
  323. wc->sl = cqe->sl;
  324. wc->dlid_path_bits = cqe->dlid_path_bits;
  325. wc->port_num = cqe->port_num;
  326. wc->vendor_err = cqe->vendor_err;
  327. /* Update shared ring state */
  328. pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
  329. return 0;
  330. }
  331. /**
  332. * pvrdma_poll_cq - poll for work completion queue entries
  333. * @ibcq: completion queue
  334. * @num_entries: the maximum number of entries
  335. * @entry: pointer to work completion array
  336. *
  337. * @return: number of polled completion entries
  338. */
  339. int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  340. {
  341. struct pvrdma_cq *cq = to_vcq(ibcq);
  342. struct pvrdma_qp *cur_qp = NULL;
  343. unsigned long flags;
  344. int npolled;
  345. if (num_entries < 1 || wc == NULL)
  346. return 0;
  347. spin_lock_irqsave(&cq->cq_lock, flags);
  348. for (npolled = 0; npolled < num_entries; ++npolled) {
  349. if (pvrdma_poll_one(cq, &cur_qp, wc + npolled))
  350. break;
  351. }
  352. spin_unlock_irqrestore(&cq->cq_lock, flags);
  353. /* Ensure we do not return errors from poll_cq */
  354. return npolled;
  355. }
  356. /**
  357. * pvrdma_resize_cq - resize CQ
  358. * @ibcq: the completion queue
  359. * @entries: CQ entries
  360. * @udata: user data
  361. *
  362. * @return: -EOPNOTSUPP as CQ resize is not supported.
  363. */
  364. int pvrdma_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
  365. {
  366. return -EOPNOTSUPP;
  367. }