svc_rdma_backchannel.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. /*
  2. * Copyright (c) 2015 Oracle. All rights reserved.
  3. *
  4. * Support for backward direction RPCs on RPC/RDMA (server-side).
  5. */
  6. #include <linux/sunrpc/svc_rdma.h>
  7. #include "xprt_rdma.h"
  8. #define RPCDBG_FACILITY RPCDBG_SVCXPRT
  9. #undef SVCRDMA_BACKCHANNEL_DEBUG
  10. int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, struct rpcrdma_msg *rmsgp,
  11. struct xdr_buf *rcvbuf)
  12. {
  13. struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
  14. struct kvec *dst, *src = &rcvbuf->head[0];
  15. struct rpc_rqst *req;
  16. unsigned long cwnd;
  17. u32 credits;
  18. size_t len;
  19. __be32 xid;
  20. __be32 *p;
  21. int ret;
  22. p = (__be32 *)src->iov_base;
  23. len = src->iov_len;
  24. xid = rmsgp->rm_xid;
  25. #ifdef SVCRDMA_BACKCHANNEL_DEBUG
  26. pr_info("%s: xid=%08x, length=%zu\n",
  27. __func__, be32_to_cpu(xid), len);
  28. pr_info("%s: RPC/RDMA: %*ph\n",
  29. __func__, (int)RPCRDMA_HDRLEN_MIN, rmsgp);
  30. pr_info("%s: RPC: %*ph\n",
  31. __func__, (int)len, p);
  32. #endif
  33. ret = -EAGAIN;
  34. if (src->iov_len < 24)
  35. goto out_shortreply;
  36. spin_lock_bh(&xprt->transport_lock);
  37. req = xprt_lookup_rqst(xprt, xid);
  38. if (!req)
  39. goto out_notfound;
  40. dst = &req->rq_private_buf.head[0];
  41. memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
  42. if (dst->iov_len < len)
  43. goto out_unlock;
  44. memcpy(dst->iov_base, p, len);
  45. credits = be32_to_cpu(rmsgp->rm_credit);
  46. if (credits == 0)
  47. credits = 1; /* don't deadlock */
  48. else if (credits > r_xprt->rx_buf.rb_bc_max_requests)
  49. credits = r_xprt->rx_buf.rb_bc_max_requests;
  50. cwnd = xprt->cwnd;
  51. xprt->cwnd = credits << RPC_CWNDSHIFT;
  52. if (xprt->cwnd > cwnd)
  53. xprt_release_rqst_cong(req->rq_task);
  54. ret = 0;
  55. xprt_complete_rqst(req->rq_task, rcvbuf->len);
  56. rcvbuf->len = 0;
  57. out_unlock:
  58. spin_unlock_bh(&xprt->transport_lock);
  59. out:
  60. return ret;
  61. out_shortreply:
  62. dprintk("svcrdma: short bc reply: xprt=%p, len=%zu\n",
  63. xprt, src->iov_len);
  64. goto out;
  65. out_notfound:
  66. dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n",
  67. xprt, be32_to_cpu(xid));
  68. goto out_unlock;
  69. }
  70. /* Send a backwards direction RPC call.
  71. *
  72. * Caller holds the connection's mutex and has already marshaled
  73. * the RPC/RDMA request.
  74. *
  75. * This is similar to svc_rdma_reply, but takes an rpc_rqst
  76. * instead, does not support chunks, and avoids blocking memory
  77. * allocation.
  78. *
  79. * XXX: There is still an opportunity to block in svc_rdma_send()
  80. * if there are no SQ entries to post the Send. This may occur if
  81. * the adapter has a small maximum SQ depth.
  82. */
  83. static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
  84. struct rpc_rqst *rqst)
  85. {
  86. struct xdr_buf *sndbuf = &rqst->rq_snd_buf;
  87. struct svc_rdma_op_ctxt *ctxt;
  88. struct svc_rdma_req_map *vec;
  89. struct ib_send_wr send_wr;
  90. int ret;
  91. vec = svc_rdma_get_req_map(rdma);
  92. ret = svc_rdma_map_xdr(rdma, sndbuf, vec, false);
  93. if (ret)
  94. goto out_err;
  95. ret = svc_rdma_repost_recv(rdma, GFP_NOIO);
  96. if (ret)
  97. goto out_err;
  98. ctxt = svc_rdma_get_context(rdma);
  99. ctxt->pages[0] = virt_to_page(rqst->rq_buffer);
  100. ctxt->count = 1;
  101. ctxt->direction = DMA_TO_DEVICE;
  102. ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey;
  103. ctxt->sge[0].length = sndbuf->len;
  104. ctxt->sge[0].addr =
  105. ib_dma_map_page(rdma->sc_cm_id->device, ctxt->pages[0], 0,
  106. sndbuf->len, DMA_TO_DEVICE);
  107. if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) {
  108. ret = -EIO;
  109. goto out_unmap;
  110. }
  111. svc_rdma_count_mappings(rdma, ctxt);
  112. memset(&send_wr, 0, sizeof(send_wr));
  113. ctxt->cqe.done = svc_rdma_wc_send;
  114. send_wr.wr_cqe = &ctxt->cqe;
  115. send_wr.sg_list = ctxt->sge;
  116. send_wr.num_sge = 1;
  117. send_wr.opcode = IB_WR_SEND;
  118. send_wr.send_flags = IB_SEND_SIGNALED;
  119. ret = svc_rdma_send(rdma, &send_wr);
  120. if (ret) {
  121. ret = -EIO;
  122. goto out_unmap;
  123. }
  124. out_err:
  125. svc_rdma_put_req_map(rdma, vec);
  126. dprintk("svcrdma: %s returns %d\n", __func__, ret);
  127. return ret;
  128. out_unmap:
  129. svc_rdma_unmap_dma(ctxt);
  130. svc_rdma_put_context(ctxt, 1);
  131. goto out_err;
  132. }
  133. /* Server-side transport endpoint wants a whole page for its send
  134. * buffer. The client RPC code constructs the RPC header in this
  135. * buffer before it invokes ->send_request.
  136. */
  137. static int
  138. xprt_rdma_bc_allocate(struct rpc_task *task)
  139. {
  140. struct rpc_rqst *rqst = task->tk_rqstp;
  141. size_t size = rqst->rq_callsize;
  142. struct page *page;
  143. if (size > PAGE_SIZE) {
  144. WARN_ONCE(1, "svcrdma: large bc buffer request (size %zu)\n",
  145. size);
  146. return -EINVAL;
  147. }
  148. /* svc_rdma_sendto releases this page */
  149. page = alloc_page(RPCRDMA_DEF_GFP);
  150. if (!page)
  151. return -ENOMEM;
  152. rqst->rq_buffer = page_address(page);
  153. rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP);
  154. if (!rqst->rq_rbuffer) {
  155. put_page(page);
  156. return -ENOMEM;
  157. }
  158. return 0;
  159. }
  160. static void
  161. xprt_rdma_bc_free(struct rpc_task *task)
  162. {
  163. struct rpc_rqst *rqst = task->tk_rqstp;
  164. kfree(rqst->rq_rbuffer);
  165. }
  166. static int
  167. rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst)
  168. {
  169. struct rpc_xprt *xprt = rqst->rq_xprt;
  170. struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
  171. struct rpcrdma_msg *headerp = (struct rpcrdma_msg *)rqst->rq_buffer;
  172. int rc;
  173. /* Space in the send buffer for an RPC/RDMA header is reserved
  174. * via xprt->tsh_size.
  175. */
  176. headerp->rm_xid = rqst->rq_xid;
  177. headerp->rm_vers = rpcrdma_version;
  178. headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_bc_max_requests);
  179. headerp->rm_type = rdma_msg;
  180. headerp->rm_body.rm_chunks[0] = xdr_zero;
  181. headerp->rm_body.rm_chunks[1] = xdr_zero;
  182. headerp->rm_body.rm_chunks[2] = xdr_zero;
  183. #ifdef SVCRDMA_BACKCHANNEL_DEBUG
  184. pr_info("%s: %*ph\n", __func__, 64, rqst->rq_buffer);
  185. #endif
  186. rc = svc_rdma_bc_sendto(rdma, rqst);
  187. if (rc)
  188. goto drop_connection;
  189. return rc;
  190. drop_connection:
  191. dprintk("svcrdma: failed to send bc call\n");
  192. xprt_disconnect_done(xprt);
  193. return -ENOTCONN;
  194. }
  195. /* Send an RPC call on the passive end of a transport
  196. * connection.
  197. */
  198. static int
  199. xprt_rdma_bc_send_request(struct rpc_task *task)
  200. {
  201. struct rpc_rqst *rqst = task->tk_rqstp;
  202. struct svc_xprt *sxprt = rqst->rq_xprt->bc_xprt;
  203. struct svcxprt_rdma *rdma;
  204. int ret;
  205. dprintk("svcrdma: sending bc call with xid: %08x\n",
  206. be32_to_cpu(rqst->rq_xid));
  207. if (!mutex_trylock(&sxprt->xpt_mutex)) {
  208. rpc_sleep_on(&sxprt->xpt_bc_pending, task, NULL);
  209. if (!mutex_trylock(&sxprt->xpt_mutex))
  210. return -EAGAIN;
  211. rpc_wake_up_queued_task(&sxprt->xpt_bc_pending, task);
  212. }
  213. ret = -ENOTCONN;
  214. rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt);
  215. if (!test_bit(XPT_DEAD, &sxprt->xpt_flags))
  216. ret = rpcrdma_bc_send_request(rdma, rqst);
  217. mutex_unlock(&sxprt->xpt_mutex);
  218. if (ret < 0)
  219. return ret;
  220. return 0;
  221. }
  222. static void
  223. xprt_rdma_bc_close(struct rpc_xprt *xprt)
  224. {
  225. dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
  226. }
  227. static void
  228. xprt_rdma_bc_put(struct rpc_xprt *xprt)
  229. {
  230. dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
  231. xprt_free(xprt);
  232. module_put(THIS_MODULE);
  233. }
  234. static struct rpc_xprt_ops xprt_rdma_bc_procs = {
  235. .reserve_xprt = xprt_reserve_xprt_cong,
  236. .release_xprt = xprt_release_xprt_cong,
  237. .alloc_slot = xprt_alloc_slot,
  238. .release_request = xprt_release_rqst_cong,
  239. .buf_alloc = xprt_rdma_bc_allocate,
  240. .buf_free = xprt_rdma_bc_free,
  241. .send_request = xprt_rdma_bc_send_request,
  242. .set_retrans_timeout = xprt_set_retrans_timeout_def,
  243. .close = xprt_rdma_bc_close,
  244. .destroy = xprt_rdma_bc_put,
  245. .print_stats = xprt_rdma_print_stats
  246. };
  247. static const struct rpc_timeout xprt_rdma_bc_timeout = {
  248. .to_initval = 60 * HZ,
  249. .to_maxval = 60 * HZ,
  250. };
  251. /* It shouldn't matter if the number of backchannel session slots
  252. * doesn't match the number of RPC/RDMA credits. That just means
  253. * one or the other will have extra slots that aren't used.
  254. */
  255. static struct rpc_xprt *
  256. xprt_setup_rdma_bc(struct xprt_create *args)
  257. {
  258. struct rpc_xprt *xprt;
  259. struct rpcrdma_xprt *new_xprt;
  260. if (args->addrlen > sizeof(xprt->addr)) {
  261. dprintk("RPC: %s: address too large\n", __func__);
  262. return ERR_PTR(-EBADF);
  263. }
  264. xprt = xprt_alloc(args->net, sizeof(*new_xprt),
  265. RPCRDMA_MAX_BC_REQUESTS,
  266. RPCRDMA_MAX_BC_REQUESTS);
  267. if (!xprt) {
  268. dprintk("RPC: %s: couldn't allocate rpc_xprt\n",
  269. __func__);
  270. return ERR_PTR(-ENOMEM);
  271. }
  272. xprt->timeout = &xprt_rdma_bc_timeout;
  273. xprt_set_bound(xprt);
  274. xprt_set_connected(xprt);
  275. xprt->bind_timeout = RPCRDMA_BIND_TO;
  276. xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
  277. xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
  278. xprt->prot = XPRT_TRANSPORT_BC_RDMA;
  279. xprt->tsh_size = RPCRDMA_HDRLEN_MIN / sizeof(__be32);
  280. xprt->ops = &xprt_rdma_bc_procs;
  281. memcpy(&xprt->addr, args->dstaddr, args->addrlen);
  282. xprt->addrlen = args->addrlen;
  283. xprt_rdma_format_addresses(xprt, (struct sockaddr *)&xprt->addr);
  284. xprt->resvport = 0;
  285. xprt->max_payload = xprt_rdma_max_inline_read;
  286. new_xprt = rpcx_to_rdmax(xprt);
  287. new_xprt->rx_buf.rb_bc_max_requests = xprt->max_reqs;
  288. xprt_get(xprt);
  289. args->bc_xprt->xpt_bc_xprt = xprt;
  290. xprt->bc_xprt = args->bc_xprt;
  291. if (!try_module_get(THIS_MODULE))
  292. goto out_fail;
  293. /* Final put for backchannel xprt is in __svc_rdma_free */
  294. xprt_get(xprt);
  295. return xprt;
  296. out_fail:
  297. xprt_rdma_free_addresses(xprt);
  298. args->bc_xprt->xpt_bc_xprt = NULL;
  299. args->bc_xprt->xpt_bc_xps = NULL;
  300. xprt_put(xprt);
  301. xprt_free(xprt);
  302. return ERR_PTR(-EINVAL);
  303. }
  304. struct xprt_class xprt_rdma_bc = {
  305. .list = LIST_HEAD_INIT(xprt_rdma_bc.list),
  306. .name = "rdma backchannel",
  307. .owner = THIS_MODULE,
  308. .ident = XPRT_TRANSPORT_BC_RDMA,
  309. .setup = xprt_setup_rdma_bc,
  310. };