svc_rdma_backchannel.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. /*
  2. * Copyright (c) 2015 Oracle. All rights reserved.
  3. *
  4. * Support for backward direction RPCs on RPC/RDMA (server-side).
  5. */
  6. #include <linux/sunrpc/svc_rdma.h>
  7. #include "xprt_rdma.h"
  8. #define RPCDBG_FACILITY RPCDBG_SVCXPRT
  9. #undef SVCRDMA_BACKCHANNEL_DEBUG
  10. int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, struct rpcrdma_msg *rmsgp,
  11. struct xdr_buf *rcvbuf)
  12. {
  13. struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
  14. struct kvec *dst, *src = &rcvbuf->head[0];
  15. struct rpc_rqst *req;
  16. unsigned long cwnd;
  17. u32 credits;
  18. size_t len;
  19. __be32 xid;
  20. __be32 *p;
  21. int ret;
  22. p = (__be32 *)src->iov_base;
  23. len = src->iov_len;
  24. xid = rmsgp->rm_xid;
  25. #ifdef SVCRDMA_BACKCHANNEL_DEBUG
  26. pr_info("%s: xid=%08x, length=%zu\n",
  27. __func__, be32_to_cpu(xid), len);
  28. pr_info("%s: RPC/RDMA: %*ph\n",
  29. __func__, (int)RPCRDMA_HDRLEN_MIN, rmsgp);
  30. pr_info("%s: RPC: %*ph\n",
  31. __func__, (int)len, p);
  32. #endif
  33. ret = -EAGAIN;
  34. if (src->iov_len < 24)
  35. goto out_shortreply;
  36. spin_lock_bh(&xprt->transport_lock);
  37. req = xprt_lookup_rqst(xprt, xid);
  38. if (!req)
  39. goto out_notfound;
  40. dst = &req->rq_private_buf.head[0];
  41. memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
  42. if (dst->iov_len < len)
  43. goto out_unlock;
  44. memcpy(dst->iov_base, p, len);
  45. credits = be32_to_cpu(rmsgp->rm_credit);
  46. if (credits == 0)
  47. credits = 1; /* don't deadlock */
  48. else if (credits > r_xprt->rx_buf.rb_bc_max_requests)
  49. credits = r_xprt->rx_buf.rb_bc_max_requests;
  50. cwnd = xprt->cwnd;
  51. xprt->cwnd = credits << RPC_CWNDSHIFT;
  52. if (xprt->cwnd > cwnd)
  53. xprt_release_rqst_cong(req->rq_task);
  54. ret = 0;
  55. xprt_complete_rqst(req->rq_task, rcvbuf->len);
  56. rcvbuf->len = 0;
  57. out_unlock:
  58. spin_unlock_bh(&xprt->transport_lock);
  59. out:
  60. return ret;
  61. out_shortreply:
  62. dprintk("svcrdma: short bc reply: xprt=%p, len=%zu\n",
  63. xprt, src->iov_len);
  64. goto out;
  65. out_notfound:
  66. dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n",
  67. xprt, be32_to_cpu(xid));
  68. goto out_unlock;
  69. }
  70. /* Send a backwards direction RPC call.
  71. *
  72. * Caller holds the connection's mutex and has already marshaled
  73. * the RPC/RDMA request.
  74. *
  75. * This is similar to svc_rdma_reply, but takes an rpc_rqst
  76. * instead, does not support chunks, and avoids blocking memory
  77. * allocation.
  78. *
  79. * XXX: There is still an opportunity to block in svc_rdma_send()
  80. * if there are no SQ entries to post the Send. This may occur if
  81. * the adapter has a small maximum SQ depth.
  82. */
  83. static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
  84. struct rpc_rqst *rqst)
  85. {
  86. struct xdr_buf *sndbuf = &rqst->rq_snd_buf;
  87. struct svc_rdma_op_ctxt *ctxt;
  88. struct svc_rdma_req_map *vec;
  89. struct ib_send_wr send_wr;
  90. int ret;
  91. vec = svc_rdma_get_req_map(rdma);
  92. ret = svc_rdma_map_xdr(rdma, sndbuf, vec);
  93. if (ret)
  94. goto out_err;
  95. /* Post a recv buffer to handle the reply for this request. */
  96. ret = svc_rdma_post_recv(rdma, GFP_NOIO);
  97. if (ret) {
  98. pr_err("svcrdma: Failed to post bc receive buffer, err=%d.\n",
  99. ret);
  100. pr_err("svcrdma: closing transport %p.\n", rdma);
  101. set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
  102. ret = -ENOTCONN;
  103. goto out_err;
  104. }
  105. ctxt = svc_rdma_get_context(rdma);
  106. ctxt->pages[0] = virt_to_page(rqst->rq_buffer);
  107. ctxt->count = 1;
  108. ctxt->wr_op = IB_WR_SEND;
  109. ctxt->direction = DMA_TO_DEVICE;
  110. ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey;
  111. ctxt->sge[0].length = sndbuf->len;
  112. ctxt->sge[0].addr =
  113. ib_dma_map_page(rdma->sc_cm_id->device, ctxt->pages[0], 0,
  114. sndbuf->len, DMA_TO_DEVICE);
  115. if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) {
  116. ret = -EIO;
  117. goto out_unmap;
  118. }
  119. atomic_inc(&rdma->sc_dma_used);
  120. memset(&send_wr, 0, sizeof(send_wr));
  121. send_wr.wr_id = (unsigned long)ctxt;
  122. send_wr.sg_list = ctxt->sge;
  123. send_wr.num_sge = 1;
  124. send_wr.opcode = IB_WR_SEND;
  125. send_wr.send_flags = IB_SEND_SIGNALED;
  126. ret = svc_rdma_send(rdma, &send_wr);
  127. if (ret) {
  128. ret = -EIO;
  129. goto out_unmap;
  130. }
  131. out_err:
  132. svc_rdma_put_req_map(rdma, vec);
  133. dprintk("svcrdma: %s returns %d\n", __func__, ret);
  134. return ret;
  135. out_unmap:
  136. svc_rdma_unmap_dma(ctxt);
  137. svc_rdma_put_context(ctxt, 1);
  138. goto out_err;
  139. }
  140. /* Server-side transport endpoint wants a whole page for its send
  141. * buffer. The client RPC code constructs the RPC header in this
  142. * buffer before it invokes ->send_request.
  143. *
  144. * Returns NULL if there was a temporary allocation failure.
  145. */
  146. static void *
  147. xprt_rdma_bc_allocate(struct rpc_task *task, size_t size)
  148. {
  149. struct rpc_rqst *rqst = task->tk_rqstp;
  150. struct svc_xprt *sxprt = rqst->rq_xprt->bc_xprt;
  151. struct svcxprt_rdma *rdma;
  152. struct page *page;
  153. rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt);
  154. /* Prevent an infinite loop: try to make this case work */
  155. if (size > PAGE_SIZE)
  156. WARN_ONCE(1, "svcrdma: large bc buffer request (size %zu)\n",
  157. size);
  158. page = alloc_page(RPCRDMA_DEF_GFP);
  159. if (!page)
  160. return NULL;
  161. return page_address(page);
  162. }
  163. static void
  164. xprt_rdma_bc_free(void *buffer)
  165. {
  166. /* No-op: ctxt and page have already been freed. */
  167. }
  168. static int
  169. rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst)
  170. {
  171. struct rpc_xprt *xprt = rqst->rq_xprt;
  172. struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
  173. struct rpcrdma_msg *headerp = (struct rpcrdma_msg *)rqst->rq_buffer;
  174. int rc;
  175. /* Space in the send buffer for an RPC/RDMA header is reserved
  176. * via xprt->tsh_size.
  177. */
  178. headerp->rm_xid = rqst->rq_xid;
  179. headerp->rm_vers = rpcrdma_version;
  180. headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_bc_max_requests);
  181. headerp->rm_type = rdma_msg;
  182. headerp->rm_body.rm_chunks[0] = xdr_zero;
  183. headerp->rm_body.rm_chunks[1] = xdr_zero;
  184. headerp->rm_body.rm_chunks[2] = xdr_zero;
  185. #ifdef SVCRDMA_BACKCHANNEL_DEBUG
  186. pr_info("%s: %*ph\n", __func__, 64, rqst->rq_buffer);
  187. #endif
  188. rc = svc_rdma_bc_sendto(rdma, rqst);
  189. if (rc)
  190. goto drop_connection;
  191. return rc;
  192. drop_connection:
  193. dprintk("svcrdma: failed to send bc call\n");
  194. xprt_disconnect_done(xprt);
  195. return -ENOTCONN;
  196. }
  197. /* Send an RPC call on the passive end of a transport
  198. * connection.
  199. */
  200. static int
  201. xprt_rdma_bc_send_request(struct rpc_task *task)
  202. {
  203. struct rpc_rqst *rqst = task->tk_rqstp;
  204. struct svc_xprt *sxprt = rqst->rq_xprt->bc_xprt;
  205. struct svcxprt_rdma *rdma;
  206. int ret;
  207. dprintk("svcrdma: sending bc call with xid: %08x\n",
  208. be32_to_cpu(rqst->rq_xid));
  209. if (!mutex_trylock(&sxprt->xpt_mutex)) {
  210. rpc_sleep_on(&sxprt->xpt_bc_pending, task, NULL);
  211. if (!mutex_trylock(&sxprt->xpt_mutex))
  212. return -EAGAIN;
  213. rpc_wake_up_queued_task(&sxprt->xpt_bc_pending, task);
  214. }
  215. ret = -ENOTCONN;
  216. rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt);
  217. if (!test_bit(XPT_DEAD, &sxprt->xpt_flags))
  218. ret = rpcrdma_bc_send_request(rdma, rqst);
  219. mutex_unlock(&sxprt->xpt_mutex);
  220. if (ret < 0)
  221. return ret;
  222. return 0;
  223. }
  224. static void
  225. xprt_rdma_bc_close(struct rpc_xprt *xprt)
  226. {
  227. dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
  228. }
  229. static void
  230. xprt_rdma_bc_put(struct rpc_xprt *xprt)
  231. {
  232. dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
  233. xprt_free(xprt);
  234. module_put(THIS_MODULE);
  235. }
  236. static struct rpc_xprt_ops xprt_rdma_bc_procs = {
  237. .reserve_xprt = xprt_reserve_xprt_cong,
  238. .release_xprt = xprt_release_xprt_cong,
  239. .alloc_slot = xprt_alloc_slot,
  240. .release_request = xprt_release_rqst_cong,
  241. .buf_alloc = xprt_rdma_bc_allocate,
  242. .buf_free = xprt_rdma_bc_free,
  243. .send_request = xprt_rdma_bc_send_request,
  244. .set_retrans_timeout = xprt_set_retrans_timeout_def,
  245. .close = xprt_rdma_bc_close,
  246. .destroy = xprt_rdma_bc_put,
  247. .print_stats = xprt_rdma_print_stats
  248. };
  249. static const struct rpc_timeout xprt_rdma_bc_timeout = {
  250. .to_initval = 60 * HZ,
  251. .to_maxval = 60 * HZ,
  252. };
  253. /* It shouldn't matter if the number of backchannel session slots
  254. * doesn't match the number of RPC/RDMA credits. That just means
  255. * one or the other will have extra slots that aren't used.
  256. */
  257. static struct rpc_xprt *
  258. xprt_setup_rdma_bc(struct xprt_create *args)
  259. {
  260. struct rpc_xprt *xprt;
  261. struct rpcrdma_xprt *new_xprt;
  262. if (args->addrlen > sizeof(xprt->addr)) {
  263. dprintk("RPC: %s: address too large\n", __func__);
  264. return ERR_PTR(-EBADF);
  265. }
  266. xprt = xprt_alloc(args->net, sizeof(*new_xprt),
  267. RPCRDMA_MAX_BC_REQUESTS,
  268. RPCRDMA_MAX_BC_REQUESTS);
  269. if (!xprt) {
  270. dprintk("RPC: %s: couldn't allocate rpc_xprt\n",
  271. __func__);
  272. return ERR_PTR(-ENOMEM);
  273. }
  274. xprt->timeout = &xprt_rdma_bc_timeout;
  275. xprt_set_bound(xprt);
  276. xprt_set_connected(xprt);
  277. xprt->bind_timeout = RPCRDMA_BIND_TO;
  278. xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
  279. xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
  280. xprt->prot = XPRT_TRANSPORT_BC_RDMA;
  281. xprt->tsh_size = RPCRDMA_HDRLEN_MIN / sizeof(__be32);
  282. xprt->ops = &xprt_rdma_bc_procs;
  283. memcpy(&xprt->addr, args->dstaddr, args->addrlen);
  284. xprt->addrlen = args->addrlen;
  285. xprt_rdma_format_addresses(xprt, (struct sockaddr *)&xprt->addr);
  286. xprt->resvport = 0;
  287. xprt->max_payload = xprt_rdma_max_inline_read;
  288. new_xprt = rpcx_to_rdmax(xprt);
  289. new_xprt->rx_buf.rb_bc_max_requests = xprt->max_reqs;
  290. xprt_get(xprt);
  291. args->bc_xprt->xpt_bc_xprt = xprt;
  292. xprt->bc_xprt = args->bc_xprt;
  293. if (!try_module_get(THIS_MODULE))
  294. goto out_fail;
  295. /* Final put for backchannel xprt is in __svc_rdma_free */
  296. xprt_get(xprt);
  297. return xprt;
  298. out_fail:
  299. xprt_rdma_free_addresses(xprt);
  300. args->bc_xprt->xpt_bc_xprt = NULL;
  301. xprt_put(xprt);
  302. xprt_free(xprt);
  303. return ERR_PTR(-EINVAL);
  304. }
  305. struct xprt_class xprt_rdma_bc = {
  306. .list = LIST_HEAD_INIT(xprt_rdma_bc.list),
  307. .name = "rdma backchannel",
  308. .owner = THIS_MODULE,
  309. .ident = XPRT_TRANSPORT_BC_RDMA,
  310. .setup = xprt_setup_rdma_bc,
  311. };