verbs.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377
  1. /*
  2. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the BSD-type
  8. * license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. *
  14. * Redistributions of source code must retain the above copyright
  15. * notice, this list of conditions and the following disclaimer.
  16. *
  17. * Redistributions in binary form must reproduce the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer in the documentation and/or other materials provided
  20. * with the distribution.
  21. *
  22. * Neither the name of the Network Appliance, Inc. nor the names of
  23. * its contributors may be used to endorse or promote products
  24. * derived from this software without specific prior written
  25. * permission.
  26. *
  27. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  38. */
  39. /*
  40. * verbs.c
  41. *
  42. * Encapsulates the major functions managing:
  43. * o adapters
  44. * o endpoints
  45. * o connections
  46. * o buffer memory
  47. */
  48. #include <linux/interrupt.h>
  49. #include <linux/slab.h>
  50. #include <linux/prefetch.h>
  51. #include <linux/sunrpc/addr.h>
  52. #include <asm/bitops.h>
  53. #include <linux/module.h> /* try_module_get()/module_put() */
  54. #include "xprt_rdma.h"
  55. /*
  56. * Globals/Macros
  57. */
  58. #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  59. # define RPCDBG_FACILITY RPCDBG_TRANS
  60. #endif
  61. /*
  62. * internal functions
  63. */
  64. static struct workqueue_struct *rpcrdma_receive_wq;
  65. int
  66. rpcrdma_alloc_wq(void)
  67. {
  68. struct workqueue_struct *recv_wq;
  69. recv_wq = alloc_workqueue("xprtrdma_receive",
  70. WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_HIGHPRI,
  71. 0);
  72. if (!recv_wq)
  73. return -ENOMEM;
  74. rpcrdma_receive_wq = recv_wq;
  75. return 0;
  76. }
  77. void
  78. rpcrdma_destroy_wq(void)
  79. {
  80. struct workqueue_struct *wq;
  81. if (rpcrdma_receive_wq) {
  82. wq = rpcrdma_receive_wq;
  83. rpcrdma_receive_wq = NULL;
  84. destroy_workqueue(wq);
  85. }
  86. }
  87. static void
  88. rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
  89. {
  90. struct rpcrdma_ep *ep = context;
  91. pr_err("RPC: %s: %s on device %s ep %p\n",
  92. __func__, ib_event_msg(event->event),
  93. event->device->name, context);
  94. if (ep->rep_connected == 1) {
  95. ep->rep_connected = -EIO;
  96. rpcrdma_conn_func(ep);
  97. wake_up_all(&ep->rep_connect_wait);
  98. }
  99. }
  100. static void
  101. rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
  102. {
  103. struct rpcrdma_ep *ep = context;
  104. pr_err("RPC: %s: %s on device %s ep %p\n",
  105. __func__, ib_event_msg(event->event),
  106. event->device->name, context);
  107. if (ep->rep_connected == 1) {
  108. ep->rep_connected = -EIO;
  109. rpcrdma_conn_func(ep);
  110. wake_up_all(&ep->rep_connect_wait);
  111. }
  112. }
  113. static void
  114. rpcrdma_sendcq_process_wc(struct ib_wc *wc)
  115. {
  116. /* WARNING: Only wr_id and status are reliable at this point */
  117. if (wc->wr_id == RPCRDMA_IGNORE_COMPLETION) {
  118. if (wc->status != IB_WC_SUCCESS &&
  119. wc->status != IB_WC_WR_FLUSH_ERR)
  120. pr_err("RPC: %s: SEND: %s\n",
  121. __func__, ib_wc_status_msg(wc->status));
  122. } else {
  123. struct rpcrdma_mw *r;
  124. r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
  125. r->mw_sendcompletion(wc);
  126. }
  127. }
  128. /* The common case is a single send completion is waiting. By
  129. * passing two WC entries to ib_poll_cq, a return code of 1
  130. * means there is exactly one WC waiting and no more. We don't
  131. * have to invoke ib_poll_cq again to know that the CQ has been
  132. * properly drained.
  133. */
  134. static void
  135. rpcrdma_sendcq_poll(struct ib_cq *cq)
  136. {
  137. struct ib_wc *pos, wcs[2];
  138. int count, rc;
  139. do {
  140. pos = wcs;
  141. rc = ib_poll_cq(cq, ARRAY_SIZE(wcs), pos);
  142. if (rc < 0)
  143. break;
  144. count = rc;
  145. while (count-- > 0)
  146. rpcrdma_sendcq_process_wc(pos++);
  147. } while (rc == ARRAY_SIZE(wcs));
  148. return;
  149. }
  150. /* Handle provider send completion upcalls.
  151. */
  152. static void
  153. rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
  154. {
  155. do {
  156. rpcrdma_sendcq_poll(cq);
  157. } while (ib_req_notify_cq(cq, IB_CQ_NEXT_COMP |
  158. IB_CQ_REPORT_MISSED_EVENTS) > 0);
  159. }
  160. static void
  161. rpcrdma_receive_worker(struct work_struct *work)
  162. {
  163. struct rpcrdma_rep *rep =
  164. container_of(work, struct rpcrdma_rep, rr_work);
  165. rpcrdma_reply_handler(rep);
  166. }
  167. static void
  168. rpcrdma_recvcq_process_wc(struct ib_wc *wc)
  169. {
  170. struct rpcrdma_rep *rep =
  171. (struct rpcrdma_rep *)(unsigned long)wc->wr_id;
  172. /* WARNING: Only wr_id and status are reliable at this point */
  173. if (wc->status != IB_WC_SUCCESS)
  174. goto out_fail;
  175. /* status == SUCCESS means all fields in wc are trustworthy */
  176. if (wc->opcode != IB_WC_RECV)
  177. return;
  178. dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n",
  179. __func__, rep, wc->byte_len);
  180. rep->rr_len = wc->byte_len;
  181. ib_dma_sync_single_for_cpu(rep->rr_device,
  182. rdmab_addr(rep->rr_rdmabuf),
  183. rep->rr_len, DMA_FROM_DEVICE);
  184. prefetch(rdmab_to_msg(rep->rr_rdmabuf));
  185. out_schedule:
  186. queue_work(rpcrdma_receive_wq, &rep->rr_work);
  187. return;
  188. out_fail:
  189. if (wc->status != IB_WC_WR_FLUSH_ERR)
  190. pr_err("RPC: %s: rep %p: %s\n",
  191. __func__, rep, ib_wc_status_msg(wc->status));
  192. rep->rr_len = RPCRDMA_BAD_LEN;
  193. goto out_schedule;
  194. }
  195. /* The wc array is on stack: automatic memory is always CPU-local.
  196. *
  197. * struct ib_wc is 64 bytes, making the poll array potentially
  198. * large. But this is at the bottom of the call chain. Further
  199. * substantial work is done in another thread.
  200. */
  201. static void
  202. rpcrdma_recvcq_poll(struct ib_cq *cq)
  203. {
  204. struct ib_wc *pos, wcs[4];
  205. int count, rc;
  206. do {
  207. pos = wcs;
  208. rc = ib_poll_cq(cq, ARRAY_SIZE(wcs), pos);
  209. if (rc < 0)
  210. break;
  211. count = rc;
  212. while (count-- > 0)
  213. rpcrdma_recvcq_process_wc(pos++);
  214. } while (rc == ARRAY_SIZE(wcs));
  215. }
  216. /* Handle provider receive completion upcalls.
  217. */
  218. static void
  219. rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
  220. {
  221. do {
  222. rpcrdma_recvcq_poll(cq);
  223. } while (ib_req_notify_cq(cq, IB_CQ_NEXT_COMP |
  224. IB_CQ_REPORT_MISSED_EVENTS) > 0);
  225. }
  226. static void
  227. rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
  228. {
  229. struct ib_wc wc;
  230. while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0)
  231. rpcrdma_recvcq_process_wc(&wc);
  232. while (ib_poll_cq(ep->rep_attr.send_cq, 1, &wc) > 0)
  233. rpcrdma_sendcq_process_wc(&wc);
  234. }
  235. static int
  236. rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
  237. {
  238. struct rpcrdma_xprt *xprt = id->context;
  239. struct rpcrdma_ia *ia = &xprt->rx_ia;
  240. struct rpcrdma_ep *ep = &xprt->rx_ep;
  241. #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  242. struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;
  243. #endif
  244. struct ib_qp_attr *attr = &ia->ri_qp_attr;
  245. struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
  246. int connstate = 0;
  247. switch (event->event) {
  248. case RDMA_CM_EVENT_ADDR_RESOLVED:
  249. case RDMA_CM_EVENT_ROUTE_RESOLVED:
  250. ia->ri_async_rc = 0;
  251. complete(&ia->ri_done);
  252. break;
  253. case RDMA_CM_EVENT_ADDR_ERROR:
  254. ia->ri_async_rc = -EHOSTUNREACH;
  255. dprintk("RPC: %s: CM address resolution error, ep 0x%p\n",
  256. __func__, ep);
  257. complete(&ia->ri_done);
  258. break;
  259. case RDMA_CM_EVENT_ROUTE_ERROR:
  260. ia->ri_async_rc = -ENETUNREACH;
  261. dprintk("RPC: %s: CM route resolution error, ep 0x%p\n",
  262. __func__, ep);
  263. complete(&ia->ri_done);
  264. break;
  265. case RDMA_CM_EVENT_ESTABLISHED:
  266. connstate = 1;
  267. ib_query_qp(ia->ri_id->qp, attr,
  268. IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
  269. iattr);
  270. dprintk("RPC: %s: %d responder resources"
  271. " (%d initiator)\n",
  272. __func__, attr->max_dest_rd_atomic,
  273. attr->max_rd_atomic);
  274. goto connected;
  275. case RDMA_CM_EVENT_CONNECT_ERROR:
  276. connstate = -ENOTCONN;
  277. goto connected;
  278. case RDMA_CM_EVENT_UNREACHABLE:
  279. connstate = -ENETDOWN;
  280. goto connected;
  281. case RDMA_CM_EVENT_REJECTED:
  282. connstate = -ECONNREFUSED;
  283. goto connected;
  284. case RDMA_CM_EVENT_DISCONNECTED:
  285. connstate = -ECONNABORTED;
  286. goto connected;
  287. case RDMA_CM_EVENT_DEVICE_REMOVAL:
  288. connstate = -ENODEV;
  289. connected:
  290. dprintk("RPC: %s: %sconnected\n",
  291. __func__, connstate > 0 ? "" : "dis");
  292. ep->rep_connected = connstate;
  293. rpcrdma_conn_func(ep);
  294. wake_up_all(&ep->rep_connect_wait);
  295. /*FALLTHROUGH*/
  296. default:
  297. dprintk("RPC: %s: %pIS:%u (ep 0x%p): %s\n",
  298. __func__, sap, rpc_get_port(sap), ep,
  299. rdma_event_msg(event->event));
  300. break;
  301. }
  302. #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  303. if (connstate == 1) {
  304. int ird = attr->max_dest_rd_atomic;
  305. int tird = ep->rep_remote_cma.responder_resources;
  306. pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
  307. sap, rpc_get_port(sap),
  308. ia->ri_device->name,
  309. ia->ri_ops->ro_displayname,
  310. xprt->rx_buf.rb_max_requests,
  311. ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
  312. } else if (connstate < 0) {
  313. pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n",
  314. sap, rpc_get_port(sap), connstate);
  315. }
  316. #endif
  317. return 0;
  318. }
  319. static void rpcrdma_destroy_id(struct rdma_cm_id *id)
  320. {
  321. if (id) {
  322. module_put(id->device->owner);
  323. rdma_destroy_id(id);
  324. }
  325. }
  326. static struct rdma_cm_id *
  327. rpcrdma_create_id(struct rpcrdma_xprt *xprt,
  328. struct rpcrdma_ia *ia, struct sockaddr *addr)
  329. {
  330. struct rdma_cm_id *id;
  331. int rc;
  332. init_completion(&ia->ri_done);
  333. id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP,
  334. IB_QPT_RC);
  335. if (IS_ERR(id)) {
  336. rc = PTR_ERR(id);
  337. dprintk("RPC: %s: rdma_create_id() failed %i\n",
  338. __func__, rc);
  339. return id;
  340. }
  341. ia->ri_async_rc = -ETIMEDOUT;
  342. rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
  343. if (rc) {
  344. dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
  345. __func__, rc);
  346. goto out;
  347. }
  348. wait_for_completion_interruptible_timeout(&ia->ri_done,
  349. msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
  350. /* FIXME:
  351. * Until xprtrdma supports DEVICE_REMOVAL, the provider must
  352. * be pinned while there are active NFS/RDMA mounts to prevent
  353. * hangs and crashes at umount time.
  354. */
  355. if (!ia->ri_async_rc && !try_module_get(id->device->owner)) {
  356. dprintk("RPC: %s: Failed to get device module\n",
  357. __func__);
  358. ia->ri_async_rc = -ENODEV;
  359. }
  360. rc = ia->ri_async_rc;
  361. if (rc)
  362. goto out;
  363. ia->ri_async_rc = -ETIMEDOUT;
  364. rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
  365. if (rc) {
  366. dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
  367. __func__, rc);
  368. goto put;
  369. }
  370. wait_for_completion_interruptible_timeout(&ia->ri_done,
  371. msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
  372. rc = ia->ri_async_rc;
  373. if (rc)
  374. goto put;
  375. return id;
  376. put:
  377. module_put(id->device->owner);
  378. out:
  379. rdma_destroy_id(id);
  380. return ERR_PTR(rc);
  381. }
  382. /*
  383. * Drain any cq, prior to teardown.
  384. */
  385. static void
  386. rpcrdma_clean_cq(struct ib_cq *cq)
  387. {
  388. struct ib_wc wc;
  389. int count = 0;
  390. while (1 == ib_poll_cq(cq, 1, &wc))
  391. ++count;
  392. if (count)
  393. dprintk("RPC: %s: flushed %d events (last 0x%x)\n",
  394. __func__, count, wc.opcode);
  395. }
  396. /*
  397. * Exported functions.
  398. */
  399. /*
  400. * Open and initialize an Interface Adapter.
  401. * o initializes fields of struct rpcrdma_ia, including
  402. * interface and provider attributes and protection zone.
  403. */
  404. int
  405. rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
  406. {
  407. struct rpcrdma_ia *ia = &xprt->rx_ia;
  408. int rc;
  409. ia->ri_dma_mr = NULL;
  410. ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
  411. if (IS_ERR(ia->ri_id)) {
  412. rc = PTR_ERR(ia->ri_id);
  413. goto out1;
  414. }
  415. ia->ri_device = ia->ri_id->device;
  416. ia->ri_pd = ib_alloc_pd(ia->ri_device);
  417. if (IS_ERR(ia->ri_pd)) {
  418. rc = PTR_ERR(ia->ri_pd);
  419. dprintk("RPC: %s: ib_alloc_pd() failed %i\n",
  420. __func__, rc);
  421. goto out2;
  422. }
  423. if (memreg == RPCRDMA_FRMR) {
  424. if (!(ia->ri_device->attrs.device_cap_flags &
  425. IB_DEVICE_MEM_MGT_EXTENSIONS) ||
  426. (ia->ri_device->attrs.max_fast_reg_page_list_len == 0)) {
  427. dprintk("RPC: %s: FRMR registration "
  428. "not supported by HCA\n", __func__);
  429. memreg = RPCRDMA_MTHCAFMR;
  430. }
  431. }
  432. if (memreg == RPCRDMA_MTHCAFMR) {
  433. if (!ia->ri_device->alloc_fmr) {
  434. dprintk("RPC: %s: MTHCAFMR registration "
  435. "not supported by HCA\n", __func__);
  436. rc = -EINVAL;
  437. goto out3;
  438. }
  439. }
  440. switch (memreg) {
  441. case RPCRDMA_FRMR:
  442. ia->ri_ops = &rpcrdma_frwr_memreg_ops;
  443. break;
  444. case RPCRDMA_ALLPHYSICAL:
  445. ia->ri_ops = &rpcrdma_physical_memreg_ops;
  446. break;
  447. case RPCRDMA_MTHCAFMR:
  448. ia->ri_ops = &rpcrdma_fmr_memreg_ops;
  449. break;
  450. default:
  451. printk(KERN_ERR "RPC: Unsupported memory "
  452. "registration mode: %d\n", memreg);
  453. rc = -ENOMEM;
  454. goto out3;
  455. }
  456. dprintk("RPC: %s: memory registration strategy is '%s'\n",
  457. __func__, ia->ri_ops->ro_displayname);
  458. rwlock_init(&ia->ri_qplock);
  459. return 0;
  460. out3:
  461. ib_dealloc_pd(ia->ri_pd);
  462. ia->ri_pd = NULL;
  463. out2:
  464. rpcrdma_destroy_id(ia->ri_id);
  465. ia->ri_id = NULL;
  466. out1:
  467. return rc;
  468. }
  469. /*
  470. * Clean up/close an IA.
  471. * o if event handles and PD have been initialized, free them.
  472. * o close the IA
  473. */
  474. void
  475. rpcrdma_ia_close(struct rpcrdma_ia *ia)
  476. {
  477. dprintk("RPC: %s: entering\n", __func__);
  478. if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
  479. if (ia->ri_id->qp)
  480. rdma_destroy_qp(ia->ri_id);
  481. rpcrdma_destroy_id(ia->ri_id);
  482. ia->ri_id = NULL;
  483. }
  484. /* If the pd is still busy, xprtrdma missed freeing a resource */
  485. if (ia->ri_pd && !IS_ERR(ia->ri_pd))
  486. ib_dealloc_pd(ia->ri_pd);
  487. }
  488. /*
  489. * Create unconnected endpoint.
  490. */
  491. int
  492. rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
  493. struct rpcrdma_create_data_internal *cdata)
  494. {
  495. struct ib_cq *sendcq, *recvcq;
  496. struct ib_cq_init_attr cq_attr = {};
  497. unsigned int max_qp_wr;
  498. int rc, err;
  499. if (ia->ri_device->attrs.max_sge < RPCRDMA_MAX_IOVS) {
  500. dprintk("RPC: %s: insufficient sge's available\n",
  501. __func__);
  502. return -ENOMEM;
  503. }
  504. if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) {
  505. dprintk("RPC: %s: insufficient wqe's available\n",
  506. __func__);
  507. return -ENOMEM;
  508. }
  509. max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS;
  510. /* check provider's send/recv wr limits */
  511. if (cdata->max_requests > max_qp_wr)
  512. cdata->max_requests = max_qp_wr;
  513. ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
  514. ep->rep_attr.qp_context = ep;
  515. ep->rep_attr.srq = NULL;
  516. ep->rep_attr.cap.max_send_wr = cdata->max_requests;
  517. ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
  518. rc = ia->ri_ops->ro_open(ia, ep, cdata);
  519. if (rc)
  520. return rc;
  521. ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
  522. ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
  523. ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS;
  524. ep->rep_attr.cap.max_recv_sge = 1;
  525. ep->rep_attr.cap.max_inline_data = 0;
  526. ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
  527. ep->rep_attr.qp_type = IB_QPT_RC;
  528. ep->rep_attr.port_num = ~0;
  529. dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
  530. "iovs: send %d recv %d\n",
  531. __func__,
  532. ep->rep_attr.cap.max_send_wr,
  533. ep->rep_attr.cap.max_recv_wr,
  534. ep->rep_attr.cap.max_send_sge,
  535. ep->rep_attr.cap.max_recv_sge);
  536. /* set trigger for requesting send completion */
  537. ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
  538. if (ep->rep_cqinit <= 2)
  539. ep->rep_cqinit = 0; /* always signal? */
  540. INIT_CQCOUNT(ep);
  541. init_waitqueue_head(&ep->rep_connect_wait);
  542. INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
  543. cq_attr.cqe = ep->rep_attr.cap.max_send_wr + 1;
  544. sendcq = ib_create_cq(ia->ri_device, rpcrdma_sendcq_upcall,
  545. rpcrdma_cq_async_error_upcall, NULL, &cq_attr);
  546. if (IS_ERR(sendcq)) {
  547. rc = PTR_ERR(sendcq);
  548. dprintk("RPC: %s: failed to create send CQ: %i\n",
  549. __func__, rc);
  550. goto out1;
  551. }
  552. rc = ib_req_notify_cq(sendcq, IB_CQ_NEXT_COMP);
  553. if (rc) {
  554. dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
  555. __func__, rc);
  556. goto out2;
  557. }
  558. cq_attr.cqe = ep->rep_attr.cap.max_recv_wr + 1;
  559. recvcq = ib_create_cq(ia->ri_device, rpcrdma_recvcq_upcall,
  560. rpcrdma_cq_async_error_upcall, NULL, &cq_attr);
  561. if (IS_ERR(recvcq)) {
  562. rc = PTR_ERR(recvcq);
  563. dprintk("RPC: %s: failed to create recv CQ: %i\n",
  564. __func__, rc);
  565. goto out2;
  566. }
  567. rc = ib_req_notify_cq(recvcq, IB_CQ_NEXT_COMP);
  568. if (rc) {
  569. dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
  570. __func__, rc);
  571. ib_destroy_cq(recvcq);
  572. goto out2;
  573. }
  574. ep->rep_attr.send_cq = sendcq;
  575. ep->rep_attr.recv_cq = recvcq;
  576. /* Initialize cma parameters */
  577. /* RPC/RDMA does not use private data */
  578. ep->rep_remote_cma.private_data = NULL;
  579. ep->rep_remote_cma.private_data_len = 0;
  580. /* Client offers RDMA Read but does not initiate */
  581. ep->rep_remote_cma.initiator_depth = 0;
  582. if (ia->ri_device->attrs.max_qp_rd_atom > 32) /* arbitrary but <= 255 */
  583. ep->rep_remote_cma.responder_resources = 32;
  584. else
  585. ep->rep_remote_cma.responder_resources =
  586. ia->ri_device->attrs.max_qp_rd_atom;
  587. ep->rep_remote_cma.retry_count = 7;
  588. ep->rep_remote_cma.flow_control = 0;
  589. ep->rep_remote_cma.rnr_retry_count = 0;
  590. return 0;
  591. out2:
  592. err = ib_destroy_cq(sendcq);
  593. if (err)
  594. dprintk("RPC: %s: ib_destroy_cq returned %i\n",
  595. __func__, err);
  596. out1:
  597. if (ia->ri_dma_mr)
  598. ib_dereg_mr(ia->ri_dma_mr);
  599. return rc;
  600. }
  601. /*
  602. * rpcrdma_ep_destroy
  603. *
  604. * Disconnect and destroy endpoint. After this, the only
  605. * valid operations on the ep are to free it (if dynamically
  606. * allocated) or re-create it.
  607. */
  608. void
  609. rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
  610. {
  611. int rc;
  612. dprintk("RPC: %s: entering, connected is %d\n",
  613. __func__, ep->rep_connected);
  614. cancel_delayed_work_sync(&ep->rep_connect_worker);
  615. if (ia->ri_id->qp)
  616. rpcrdma_ep_disconnect(ep, ia);
  617. rpcrdma_clean_cq(ep->rep_attr.recv_cq);
  618. rpcrdma_clean_cq(ep->rep_attr.send_cq);
  619. if (ia->ri_id->qp) {
  620. rdma_destroy_qp(ia->ri_id);
  621. ia->ri_id->qp = NULL;
  622. }
  623. rc = ib_destroy_cq(ep->rep_attr.recv_cq);
  624. if (rc)
  625. dprintk("RPC: %s: ib_destroy_cq returned %i\n",
  626. __func__, rc);
  627. rc = ib_destroy_cq(ep->rep_attr.send_cq);
  628. if (rc)
  629. dprintk("RPC: %s: ib_destroy_cq returned %i\n",
  630. __func__, rc);
  631. if (ia->ri_dma_mr) {
  632. rc = ib_dereg_mr(ia->ri_dma_mr);
  633. dprintk("RPC: %s: ib_dereg_mr returned %i\n",
  634. __func__, rc);
  635. }
  636. }
  637. /*
  638. * Connect unconnected endpoint.
  639. */
  640. int
  641. rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
  642. {
  643. struct rdma_cm_id *id, *old;
  644. int rc = 0;
  645. int retry_count = 0;
  646. if (ep->rep_connected != 0) {
  647. struct rpcrdma_xprt *xprt;
  648. retry:
  649. dprintk("RPC: %s: reconnecting...\n", __func__);
  650. rpcrdma_ep_disconnect(ep, ia);
  651. rpcrdma_flush_cqs(ep);
  652. xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
  653. id = rpcrdma_create_id(xprt, ia,
  654. (struct sockaddr *)&xprt->rx_data.addr);
  655. if (IS_ERR(id)) {
  656. rc = -EHOSTUNREACH;
  657. goto out;
  658. }
  659. /* TEMP TEMP TEMP - fail if new device:
  660. * Deregister/remarshal *all* requests!
  661. * Close and recreate adapter, pd, etc!
  662. * Re-determine all attributes still sane!
  663. * More stuff I haven't thought of!
  664. * Rrrgh!
  665. */
  666. if (ia->ri_device != id->device) {
  667. printk("RPC: %s: can't reconnect on "
  668. "different device!\n", __func__);
  669. rpcrdma_destroy_id(id);
  670. rc = -ENETUNREACH;
  671. goto out;
  672. }
  673. /* END TEMP */
  674. rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
  675. if (rc) {
  676. dprintk("RPC: %s: rdma_create_qp failed %i\n",
  677. __func__, rc);
  678. rpcrdma_destroy_id(id);
  679. rc = -ENETUNREACH;
  680. goto out;
  681. }
  682. write_lock(&ia->ri_qplock);
  683. old = ia->ri_id;
  684. ia->ri_id = id;
  685. write_unlock(&ia->ri_qplock);
  686. rdma_destroy_qp(old);
  687. rpcrdma_destroy_id(old);
  688. } else {
  689. dprintk("RPC: %s: connecting...\n", __func__);
  690. rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
  691. if (rc) {
  692. dprintk("RPC: %s: rdma_create_qp failed %i\n",
  693. __func__, rc);
  694. /* do not update ep->rep_connected */
  695. return -ENETUNREACH;
  696. }
  697. }
  698. ep->rep_connected = 0;
  699. rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
  700. if (rc) {
  701. dprintk("RPC: %s: rdma_connect() failed with %i\n",
  702. __func__, rc);
  703. goto out;
  704. }
  705. wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
  706. /*
  707. * Check state. A non-peer reject indicates no listener
  708. * (ECONNREFUSED), which may be a transient state. All
  709. * others indicate a transport condition which has already
  710. * undergone a best-effort.
  711. */
  712. if (ep->rep_connected == -ECONNREFUSED &&
  713. ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
  714. dprintk("RPC: %s: non-peer_reject, retry\n", __func__);
  715. goto retry;
  716. }
  717. if (ep->rep_connected <= 0) {
  718. /* Sometimes, the only way to reliably connect to remote
  719. * CMs is to use same nonzero values for ORD and IRD. */
  720. if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 &&
  721. (ep->rep_remote_cma.responder_resources == 0 ||
  722. ep->rep_remote_cma.initiator_depth !=
  723. ep->rep_remote_cma.responder_resources)) {
  724. if (ep->rep_remote_cma.responder_resources == 0)
  725. ep->rep_remote_cma.responder_resources = 1;
  726. ep->rep_remote_cma.initiator_depth =
  727. ep->rep_remote_cma.responder_resources;
  728. goto retry;
  729. }
  730. rc = ep->rep_connected;
  731. } else {
  732. struct rpcrdma_xprt *r_xprt;
  733. unsigned int extras;
  734. dprintk("RPC: %s: connected\n", __func__);
  735. r_xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
  736. extras = r_xprt->rx_buf.rb_bc_srv_max_requests;
  737. if (extras) {
  738. rc = rpcrdma_ep_post_extra_recv(r_xprt, extras);
  739. if (rc) {
  740. pr_warn("%s: rpcrdma_ep_post_extra_recv: %i\n",
  741. __func__, rc);
  742. rc = 0;
  743. }
  744. }
  745. }
  746. out:
  747. if (rc)
  748. ep->rep_connected = rc;
  749. return rc;
  750. }
  751. /*
  752. * rpcrdma_ep_disconnect
  753. *
  754. * This is separate from destroy to facilitate the ability
  755. * to reconnect without recreating the endpoint.
  756. *
  757. * This call is not reentrant, and must not be made in parallel
  758. * on the same endpoint.
  759. */
  760. void
  761. rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
  762. {
  763. int rc;
  764. rpcrdma_flush_cqs(ep);
  765. rc = rdma_disconnect(ia->ri_id);
  766. if (!rc) {
  767. /* returns without wait if not connected */
  768. wait_event_interruptible(ep->rep_connect_wait,
  769. ep->rep_connected != 1);
  770. dprintk("RPC: %s: after wait, %sconnected\n", __func__,
  771. (ep->rep_connected == 1) ? "still " : "dis");
  772. } else {
  773. dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc);
  774. ep->rep_connected = rc;
  775. }
  776. }
  777. struct rpcrdma_req *
  778. rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
  779. {
  780. struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
  781. struct rpcrdma_req *req;
  782. req = kzalloc(sizeof(*req), GFP_KERNEL);
  783. if (req == NULL)
  784. return ERR_PTR(-ENOMEM);
  785. INIT_LIST_HEAD(&req->rl_free);
  786. spin_lock(&buffer->rb_reqslock);
  787. list_add(&req->rl_all, &buffer->rb_allreqs);
  788. spin_unlock(&buffer->rb_reqslock);
  789. req->rl_buffer = &r_xprt->rx_buf;
  790. return req;
  791. }
  792. struct rpcrdma_rep *
  793. rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
  794. {
  795. struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
  796. struct rpcrdma_ia *ia = &r_xprt->rx_ia;
  797. struct rpcrdma_rep *rep;
  798. int rc;
  799. rc = -ENOMEM;
  800. rep = kzalloc(sizeof(*rep), GFP_KERNEL);
  801. if (rep == NULL)
  802. goto out;
  803. rep->rr_rdmabuf = rpcrdma_alloc_regbuf(ia, cdata->inline_rsize,
  804. GFP_KERNEL);
  805. if (IS_ERR(rep->rr_rdmabuf)) {
  806. rc = PTR_ERR(rep->rr_rdmabuf);
  807. goto out_free;
  808. }
  809. rep->rr_device = ia->ri_device;
  810. rep->rr_rxprt = r_xprt;
  811. INIT_WORK(&rep->rr_work, rpcrdma_receive_worker);
  812. return rep;
  813. out_free:
  814. kfree(rep);
  815. out:
  816. return ERR_PTR(rc);
  817. }
  818. int
  819. rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
  820. {
  821. struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
  822. struct rpcrdma_ia *ia = &r_xprt->rx_ia;
  823. int i, rc;
  824. buf->rb_max_requests = r_xprt->rx_data.max_requests;
  825. buf->rb_bc_srv_max_requests = 0;
  826. spin_lock_init(&buf->rb_lock);
  827. rc = ia->ri_ops->ro_init(r_xprt);
  828. if (rc)
  829. goto out;
  830. INIT_LIST_HEAD(&buf->rb_send_bufs);
  831. INIT_LIST_HEAD(&buf->rb_allreqs);
  832. spin_lock_init(&buf->rb_reqslock);
  833. for (i = 0; i < buf->rb_max_requests; i++) {
  834. struct rpcrdma_req *req;
  835. req = rpcrdma_create_req(r_xprt);
  836. if (IS_ERR(req)) {
  837. dprintk("RPC: %s: request buffer %d alloc"
  838. " failed\n", __func__, i);
  839. rc = PTR_ERR(req);
  840. goto out;
  841. }
  842. req->rl_backchannel = false;
  843. list_add(&req->rl_free, &buf->rb_send_bufs);
  844. }
  845. INIT_LIST_HEAD(&buf->rb_recv_bufs);
  846. for (i = 0; i < buf->rb_max_requests + 2; i++) {
  847. struct rpcrdma_rep *rep;
  848. rep = rpcrdma_create_rep(r_xprt);
  849. if (IS_ERR(rep)) {
  850. dprintk("RPC: %s: reply buffer %d alloc failed\n",
  851. __func__, i);
  852. rc = PTR_ERR(rep);
  853. goto out;
  854. }
  855. list_add(&rep->rr_list, &buf->rb_recv_bufs);
  856. }
  857. return 0;
  858. out:
  859. rpcrdma_buffer_destroy(buf);
  860. return rc;
  861. }
  862. static struct rpcrdma_req *
  863. rpcrdma_buffer_get_req_locked(struct rpcrdma_buffer *buf)
  864. {
  865. struct rpcrdma_req *req;
  866. req = list_first_entry(&buf->rb_send_bufs,
  867. struct rpcrdma_req, rl_free);
  868. list_del(&req->rl_free);
  869. return req;
  870. }
  871. static struct rpcrdma_rep *
  872. rpcrdma_buffer_get_rep_locked(struct rpcrdma_buffer *buf)
  873. {
  874. struct rpcrdma_rep *rep;
  875. rep = list_first_entry(&buf->rb_recv_bufs,
  876. struct rpcrdma_rep, rr_list);
  877. list_del(&rep->rr_list);
  878. return rep;
  879. }
  880. static void
  881. rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep)
  882. {
  883. rpcrdma_free_regbuf(ia, rep->rr_rdmabuf);
  884. kfree(rep);
  885. }
  886. void
  887. rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
  888. {
  889. rpcrdma_free_regbuf(ia, req->rl_sendbuf);
  890. rpcrdma_free_regbuf(ia, req->rl_rdmabuf);
  891. kfree(req);
  892. }
  893. void
  894. rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
  895. {
  896. struct rpcrdma_ia *ia = rdmab_to_ia(buf);
  897. while (!list_empty(&buf->rb_recv_bufs)) {
  898. struct rpcrdma_rep *rep;
  899. rep = rpcrdma_buffer_get_rep_locked(buf);
  900. rpcrdma_destroy_rep(ia, rep);
  901. }
  902. spin_lock(&buf->rb_reqslock);
  903. while (!list_empty(&buf->rb_allreqs)) {
  904. struct rpcrdma_req *req;
  905. req = list_first_entry(&buf->rb_allreqs,
  906. struct rpcrdma_req, rl_all);
  907. list_del(&req->rl_all);
  908. spin_unlock(&buf->rb_reqslock);
  909. rpcrdma_destroy_req(ia, req);
  910. spin_lock(&buf->rb_reqslock);
  911. }
  912. spin_unlock(&buf->rb_reqslock);
  913. ia->ri_ops->ro_destroy(buf);
  914. }
  915. struct rpcrdma_mw *
  916. rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
  917. {
  918. struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
  919. struct rpcrdma_mw *mw = NULL;
  920. spin_lock(&buf->rb_mwlock);
  921. if (!list_empty(&buf->rb_mws)) {
  922. mw = list_first_entry(&buf->rb_mws,
  923. struct rpcrdma_mw, mw_list);
  924. list_del_init(&mw->mw_list);
  925. }
  926. spin_unlock(&buf->rb_mwlock);
  927. if (!mw)
  928. pr_err("RPC: %s: no MWs available\n", __func__);
  929. return mw;
  930. }
  931. void
  932. rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
  933. {
  934. struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
  935. spin_lock(&buf->rb_mwlock);
  936. list_add_tail(&mw->mw_list, &buf->rb_mws);
  937. spin_unlock(&buf->rb_mwlock);
  938. }
  939. /*
  940. * Get a set of request/reply buffers.
  941. *
  942. * Reply buffer (if available) is attached to send buffer upon return.
  943. */
  944. struct rpcrdma_req *
  945. rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
  946. {
  947. struct rpcrdma_req *req;
  948. spin_lock(&buffers->rb_lock);
  949. if (list_empty(&buffers->rb_send_bufs))
  950. goto out_reqbuf;
  951. req = rpcrdma_buffer_get_req_locked(buffers);
  952. if (list_empty(&buffers->rb_recv_bufs))
  953. goto out_repbuf;
  954. req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
  955. spin_unlock(&buffers->rb_lock);
  956. return req;
  957. out_reqbuf:
  958. spin_unlock(&buffers->rb_lock);
  959. pr_warn("RPC: %s: out of request buffers\n", __func__);
  960. return NULL;
  961. out_repbuf:
  962. spin_unlock(&buffers->rb_lock);
  963. pr_warn("RPC: %s: out of reply buffers\n", __func__);
  964. req->rl_reply = NULL;
  965. return req;
  966. }
  967. /*
  968. * Put request/reply buffers back into pool.
  969. * Pre-decrement counter/array index.
  970. */
  971. void
  972. rpcrdma_buffer_put(struct rpcrdma_req *req)
  973. {
  974. struct rpcrdma_buffer *buffers = req->rl_buffer;
  975. struct rpcrdma_rep *rep = req->rl_reply;
  976. req->rl_niovs = 0;
  977. req->rl_reply = NULL;
  978. spin_lock(&buffers->rb_lock);
  979. list_add_tail(&req->rl_free, &buffers->rb_send_bufs);
  980. if (rep)
  981. list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
  982. spin_unlock(&buffers->rb_lock);
  983. }
  984. /*
  985. * Recover reply buffers from pool.
  986. * This happens when recovering from disconnect.
  987. */
  988. void
  989. rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
  990. {
  991. struct rpcrdma_buffer *buffers = req->rl_buffer;
  992. spin_lock(&buffers->rb_lock);
  993. if (!list_empty(&buffers->rb_recv_bufs))
  994. req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
  995. spin_unlock(&buffers->rb_lock);
  996. }
  997. /*
  998. * Put reply buffers back into pool when not attached to
  999. * request. This happens in error conditions.
  1000. */
  1001. void
  1002. rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
  1003. {
  1004. struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
  1005. spin_lock(&buffers->rb_lock);
  1006. list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
  1007. spin_unlock(&buffers->rb_lock);
  1008. }
  1009. /*
  1010. * Wrappers for internal-use kmalloc memory registration, used by buffer code.
  1011. */
  1012. void
  1013. rpcrdma_mapping_error(struct rpcrdma_mr_seg *seg)
  1014. {
  1015. dprintk("RPC: map_one: offset %p iova %llx len %zu\n",
  1016. seg->mr_offset,
  1017. (unsigned long long)seg->mr_dma, seg->mr_dmalen);
  1018. }
  1019. /**
  1020. * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers
  1021. * @ia: controlling rpcrdma_ia
  1022. * @size: size of buffer to be allocated, in bytes
  1023. * @flags: GFP flags
  1024. *
  1025. * Returns pointer to private header of an area of internally
  1026. * registered memory, or an ERR_PTR. The registered buffer follows
  1027. * the end of the private header.
  1028. *
  1029. * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
  1030. * receiving the payload of RDMA RECV operations. regbufs are not
  1031. * used for RDMA READ/WRITE operations, thus are registered only for
  1032. * LOCAL access.
  1033. */
  1034. struct rpcrdma_regbuf *
  1035. rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
  1036. {
  1037. struct rpcrdma_regbuf *rb;
  1038. struct ib_sge *iov;
  1039. rb = kmalloc(sizeof(*rb) + size, flags);
  1040. if (rb == NULL)
  1041. goto out;
  1042. iov = &rb->rg_iov;
  1043. iov->addr = ib_dma_map_single(ia->ri_device,
  1044. (void *)rb->rg_base, size,
  1045. DMA_BIDIRECTIONAL);
  1046. if (ib_dma_mapping_error(ia->ri_device, iov->addr))
  1047. goto out_free;
  1048. iov->length = size;
  1049. iov->lkey = ia->ri_pd->local_dma_lkey;
  1050. rb->rg_size = size;
  1051. rb->rg_owner = NULL;
  1052. return rb;
  1053. out_free:
  1054. kfree(rb);
  1055. out:
  1056. return ERR_PTR(-ENOMEM);
  1057. }
  1058. /**
  1059. * rpcrdma_free_regbuf - deregister and free registered buffer
  1060. * @ia: controlling rpcrdma_ia
  1061. * @rb: regbuf to be deregistered and freed
  1062. */
  1063. void
  1064. rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
  1065. {
  1066. struct ib_sge *iov;
  1067. if (!rb)
  1068. return;
  1069. iov = &rb->rg_iov;
  1070. ib_dma_unmap_single(ia->ri_device,
  1071. iov->addr, iov->length, DMA_BIDIRECTIONAL);
  1072. kfree(rb);
  1073. }
  1074. /*
  1075. * Prepost any receive buffer, then post send.
  1076. *
  1077. * Receive buffer is donated to hardware, reclaimed upon recv completion.
  1078. */
  1079. int
  1080. rpcrdma_ep_post(struct rpcrdma_ia *ia,
  1081. struct rpcrdma_ep *ep,
  1082. struct rpcrdma_req *req)
  1083. {
  1084. struct ib_device *device = ia->ri_device;
  1085. struct ib_send_wr send_wr, *send_wr_fail;
  1086. struct rpcrdma_rep *rep = req->rl_reply;
  1087. struct ib_sge *iov = req->rl_send_iov;
  1088. int i, rc;
  1089. if (rep) {
  1090. rc = rpcrdma_ep_post_recv(ia, ep, rep);
  1091. if (rc)
  1092. goto out;
  1093. req->rl_reply = NULL;
  1094. }
  1095. send_wr.next = NULL;
  1096. send_wr.wr_id = RPCRDMA_IGNORE_COMPLETION;
  1097. send_wr.sg_list = iov;
  1098. send_wr.num_sge = req->rl_niovs;
  1099. send_wr.opcode = IB_WR_SEND;
  1100. for (i = 0; i < send_wr.num_sge; i++)
  1101. ib_dma_sync_single_for_device(device, iov[i].addr,
  1102. iov[i].length, DMA_TO_DEVICE);
  1103. dprintk("RPC: %s: posting %d s/g entries\n",
  1104. __func__, send_wr.num_sge);
  1105. if (DECR_CQCOUNT(ep) > 0)
  1106. send_wr.send_flags = 0;
  1107. else { /* Provider must take a send completion every now and then */
  1108. INIT_CQCOUNT(ep);
  1109. send_wr.send_flags = IB_SEND_SIGNALED;
  1110. }
  1111. rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
  1112. if (rc)
  1113. dprintk("RPC: %s: ib_post_send returned %i\n", __func__,
  1114. rc);
  1115. out:
  1116. return rc;
  1117. }
  1118. /*
  1119. * (Re)post a receive buffer.
  1120. */
  1121. int
  1122. rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
  1123. struct rpcrdma_ep *ep,
  1124. struct rpcrdma_rep *rep)
  1125. {
  1126. struct ib_recv_wr recv_wr, *recv_wr_fail;
  1127. int rc;
  1128. recv_wr.next = NULL;
  1129. recv_wr.wr_id = (u64) (unsigned long) rep;
  1130. recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
  1131. recv_wr.num_sge = 1;
  1132. ib_dma_sync_single_for_cpu(ia->ri_device,
  1133. rdmab_addr(rep->rr_rdmabuf),
  1134. rdmab_length(rep->rr_rdmabuf),
  1135. DMA_BIDIRECTIONAL);
  1136. rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);
  1137. if (rc)
  1138. dprintk("RPC: %s: ib_post_recv returned %i\n", __func__,
  1139. rc);
  1140. return rc;
  1141. }
  1142. /**
  1143. * rpcrdma_ep_post_extra_recv - Post buffers for incoming backchannel requests
  1144. * @r_xprt: transport associated with these backchannel resources
  1145. * @min_reqs: minimum number of incoming requests expected
  1146. *
  1147. * Returns zero if all requested buffers were posted, or a negative errno.
  1148. */
  1149. int
  1150. rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count)
  1151. {
  1152. struct rpcrdma_buffer *buffers = &r_xprt->rx_buf;
  1153. struct rpcrdma_ia *ia = &r_xprt->rx_ia;
  1154. struct rpcrdma_ep *ep = &r_xprt->rx_ep;
  1155. struct rpcrdma_rep *rep;
  1156. int rc;
  1157. while (count--) {
  1158. spin_lock(&buffers->rb_lock);
  1159. if (list_empty(&buffers->rb_recv_bufs))
  1160. goto out_reqbuf;
  1161. rep = rpcrdma_buffer_get_rep_locked(buffers);
  1162. spin_unlock(&buffers->rb_lock);
  1163. rc = rpcrdma_ep_post_recv(ia, ep, rep);
  1164. if (rc)
  1165. goto out_rc;
  1166. }
  1167. return 0;
  1168. out_reqbuf:
  1169. spin_unlock(&buffers->rb_lock);
  1170. pr_warn("%s: no extra receive buffers\n", __func__);
  1171. return -ENOMEM;
  1172. out_rc:
  1173. rpcrdma_recv_buffer_put(rep);
  1174. return rc;
  1175. }
  1176. /* How many chunk list items fit within our inline buffers?
  1177. */
  1178. unsigned int
  1179. rpcrdma_max_segments(struct rpcrdma_xprt *r_xprt)
  1180. {
  1181. struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
  1182. int bytes, segments;
  1183. bytes = min_t(unsigned int, cdata->inline_wsize, cdata->inline_rsize);
  1184. bytes -= RPCRDMA_HDRLEN_MIN;
  1185. if (bytes < sizeof(struct rpcrdma_segment) * 2) {
  1186. pr_warn("RPC: %s: inline threshold too small\n",
  1187. __func__);
  1188. return 0;
  1189. }
  1190. segments = 1 << (fls(bytes / sizeof(struct rpcrdma_segment)) - 1);
  1191. dprintk("RPC: %s: max chunk list size = %d segments\n",
  1192. __func__, segments);
  1193. return segments;
  1194. }