xprt_rdma.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670
  1. /*
  2. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the BSD-type
  8. * license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. *
  14. * Redistributions of source code must retain the above copyright
  15. * notice, this list of conditions and the following disclaimer.
  16. *
  17. * Redistributions in binary form must reproduce the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer in the documentation and/or other materials provided
  20. * with the distribution.
  21. *
  22. * Neither the name of the Network Appliance, Inc. nor the names of
  23. * its contributors may be used to endorse or promote products
  24. * derived from this software without specific prior written
  25. * permission.
  26. *
  27. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  38. */
  39. #ifndef _LINUX_SUNRPC_XPRT_RDMA_H
  40. #define _LINUX_SUNRPC_XPRT_RDMA_H
  41. #include <linux/wait.h> /* wait_queue_head_t, etc */
  42. #include <linux/spinlock.h> /* spinlock_t, etc */
  43. #include <linux/atomic.h> /* atomic_t, etc */
  44. #include <linux/workqueue.h> /* struct work_struct */
  45. #include <rdma/rdma_cm.h> /* RDMA connection api */
  46. #include <rdma/ib_verbs.h> /* RDMA verbs api */
  47. #include <linux/sunrpc/clnt.h> /* rpc_xprt */
  48. #include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */
  49. #include <linux/sunrpc/xprtrdma.h> /* xprt parameters */
  50. #define RDMA_RESOLVE_TIMEOUT (5000) /* 5 seconds */
  51. #define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */
  52. #define RPCRDMA_BIND_TO (60U * HZ)
  53. #define RPCRDMA_INIT_REEST_TO (5U * HZ)
  54. #define RPCRDMA_MAX_REEST_TO (30U * HZ)
  55. #define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ)
  56. /*
  57. * Interface Adapter -- one per transport instance
  58. */
  59. struct rpcrdma_ia {
  60. const struct rpcrdma_memreg_ops *ri_ops;
  61. struct ib_device *ri_device;
  62. struct rdma_cm_id *ri_id;
  63. struct ib_pd *ri_pd;
  64. struct completion ri_done;
  65. struct completion ri_remove_done;
  66. int ri_async_rc;
  67. unsigned int ri_max_segs;
  68. unsigned int ri_max_frmr_depth;
  69. unsigned int ri_max_inline_write;
  70. unsigned int ri_max_inline_read;
  71. unsigned int ri_max_send_sges;
  72. bool ri_reminv_expected;
  73. bool ri_implicit_roundup;
  74. enum ib_mr_type ri_mrtype;
  75. unsigned long ri_flags;
  76. struct ib_qp_attr ri_qp_attr;
  77. struct ib_qp_init_attr ri_qp_init_attr;
  78. };
  79. enum {
  80. RPCRDMA_IAF_REMOVING = 0,
  81. };
  82. /*
  83. * RDMA Endpoint -- one per transport instance
  84. */
  85. struct rpcrdma_ep {
  86. atomic_t rep_cqcount;
  87. int rep_cqinit;
  88. int rep_connected;
  89. struct ib_qp_init_attr rep_attr;
  90. wait_queue_head_t rep_connect_wait;
  91. struct rpcrdma_connect_private rep_cm_private;
  92. struct rdma_conn_param rep_remote_cma;
  93. struct sockaddr_storage rep_remote_addr;
  94. struct delayed_work rep_connect_worker;
  95. };
  96. static inline void
  97. rpcrdma_init_cqcount(struct rpcrdma_ep *ep, int count)
  98. {
  99. atomic_set(&ep->rep_cqcount, ep->rep_cqinit - count);
  100. }
  101. /* To update send queue accounting, provider must take a
  102. * send completion every now and then.
  103. */
  104. static inline void
  105. rpcrdma_set_signaled(struct rpcrdma_ep *ep, struct ib_send_wr *send_wr)
  106. {
  107. send_wr->send_flags = 0;
  108. if (unlikely(atomic_sub_return(1, &ep->rep_cqcount) <= 0)) {
  109. rpcrdma_init_cqcount(ep, 0);
  110. send_wr->send_flags = IB_SEND_SIGNALED;
  111. }
  112. }
  113. /* Pre-allocate extra Work Requests for handling backward receives
  114. * and sends. This is a fixed value because the Work Queues are
  115. * allocated when the forward channel is set up.
  116. */
  117. #if defined(CONFIG_SUNRPC_BACKCHANNEL)
  118. #define RPCRDMA_BACKWARD_WRS (8)
  119. #else
  120. #define RPCRDMA_BACKWARD_WRS (0)
  121. #endif
  122. /* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
  123. *
  124. * The below structure appears at the front of a large region of kmalloc'd
  125. * memory, which always starts on a good alignment boundary.
  126. */
  127. struct rpcrdma_regbuf {
  128. struct ib_sge rg_iov;
  129. struct ib_device *rg_device;
  130. enum dma_data_direction rg_direction;
  131. __be32 rg_base[0] __attribute__ ((aligned(256)));
  132. };
  133. static inline u64
  134. rdmab_addr(struct rpcrdma_regbuf *rb)
  135. {
  136. return rb->rg_iov.addr;
  137. }
  138. static inline u32
  139. rdmab_length(struct rpcrdma_regbuf *rb)
  140. {
  141. return rb->rg_iov.length;
  142. }
  143. static inline u32
  144. rdmab_lkey(struct rpcrdma_regbuf *rb)
  145. {
  146. return rb->rg_iov.lkey;
  147. }
  148. static inline struct rpcrdma_msg *
  149. rdmab_to_msg(struct rpcrdma_regbuf *rb)
  150. {
  151. return (struct rpcrdma_msg *)rb->rg_base;
  152. }
  153. static inline struct ib_device *
  154. rdmab_device(struct rpcrdma_regbuf *rb)
  155. {
  156. return rb->rg_device;
  157. }
  158. #define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN)
  159. /* To ensure a transport can always make forward progress,
  160. * the number of RDMA segments allowed in header chunk lists
  161. * is capped at 8. This prevents less-capable devices and
  162. * memory registrations from overrunning the Send buffer
  163. * while building chunk lists.
  164. *
  165. * Elements of the Read list take up more room than the
  166. * Write list or Reply chunk. 8 read segments means the Read
  167. * list (or Write list or Reply chunk) cannot consume more
  168. * than
  169. *
  170. * ((8 + 2) * read segment size) + 1 XDR words, or 244 bytes.
  171. *
  172. * And the fixed part of the header is another 24 bytes.
  173. *
  174. * The smallest inline threshold is 1024 bytes, ensuring that
  175. * at least 750 bytes are available for RPC messages.
  176. */
  177. enum {
  178. RPCRDMA_MAX_HDR_SEGS = 8,
  179. RPCRDMA_HDRBUF_SIZE = 256,
  180. };
  181. /*
  182. * struct rpcrdma_rep -- this structure encapsulates state required to recv
  183. * and complete a reply, asychronously. It needs several pieces of
  184. * state:
  185. * o recv buffer (posted to provider)
  186. * o ib_sge (also donated to provider)
  187. * o status of reply (length, success or not)
  188. * o bookkeeping state to get run by reply handler (list, etc)
  189. *
  190. * These are allocated during initialization, per-transport instance.
  191. *
  192. * N of these are associated with a transport instance, and stored in
  193. * struct rpcrdma_buffer. N is the max number of outstanding requests.
  194. */
  195. struct rpcrdma_rep {
  196. struct ib_cqe rr_cqe;
  197. unsigned int rr_len;
  198. int rr_wc_flags;
  199. u32 rr_inv_rkey;
  200. struct rpcrdma_xprt *rr_rxprt;
  201. struct work_struct rr_work;
  202. struct list_head rr_list;
  203. struct ib_recv_wr rr_recv_wr;
  204. struct rpcrdma_regbuf *rr_rdmabuf;
  205. };
  206. #define RPCRDMA_BAD_LEN (~0U)
  207. /*
  208. * struct rpcrdma_mw - external memory region metadata
  209. *
  210. * An external memory region is any buffer or page that is registered
  211. * on the fly (ie, not pre-registered).
  212. *
  213. * Each rpcrdma_buffer has a list of free MWs anchored in rb_mws. During
  214. * call_allocate, rpcrdma_buffer_get() assigns one to each segment in
  215. * an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep
  216. * track of registration metadata while each RPC is pending.
  217. * rpcrdma_deregister_external() uses this metadata to unmap and
  218. * release these resources when an RPC is complete.
  219. */
  220. enum rpcrdma_frmr_state {
  221. FRMR_IS_INVALID, /* ready to be used */
  222. FRMR_IS_VALID, /* in use */
  223. FRMR_FLUSHED_FR, /* flushed FASTREG WR */
  224. FRMR_FLUSHED_LI, /* flushed LOCALINV WR */
  225. };
  226. struct rpcrdma_frmr {
  227. struct ib_mr *fr_mr;
  228. struct ib_cqe fr_cqe;
  229. enum rpcrdma_frmr_state fr_state;
  230. struct completion fr_linv_done;
  231. union {
  232. struct ib_reg_wr fr_regwr;
  233. struct ib_send_wr fr_invwr;
  234. };
  235. };
  236. struct rpcrdma_fmr {
  237. struct ib_fmr *fm_mr;
  238. u64 *fm_physaddrs;
  239. };
  240. struct rpcrdma_mw {
  241. struct list_head mw_list;
  242. struct scatterlist *mw_sg;
  243. int mw_nents;
  244. enum dma_data_direction mw_dir;
  245. unsigned long mw_flags;
  246. union {
  247. struct rpcrdma_fmr fmr;
  248. struct rpcrdma_frmr frmr;
  249. };
  250. struct rpcrdma_xprt *mw_xprt;
  251. u32 mw_handle;
  252. u32 mw_length;
  253. u64 mw_offset;
  254. struct list_head mw_all;
  255. };
  256. /* mw_flags */
  257. enum {
  258. RPCRDMA_MW_F_RI = 1,
  259. };
  260. /*
  261. * struct rpcrdma_req -- structure central to the request/reply sequence.
  262. *
  263. * N of these are associated with a transport instance, and stored in
  264. * struct rpcrdma_buffer. N is the max number of outstanding requests.
  265. *
  266. * It includes pre-registered buffer memory for send AND recv.
  267. * The recv buffer, however, is not owned by this structure, and
  268. * is "donated" to the hardware when a recv is posted. When a
  269. * reply is handled, the recv buffer used is given back to the
  270. * struct rpcrdma_req associated with the request.
  271. *
  272. * In addition to the basic memory, this structure includes an array
  273. * of iovs for send operations. The reason is that the iovs passed to
  274. * ib_post_{send,recv} must not be modified until the work request
  275. * completes.
  276. */
  277. /* Maximum number of page-sized "segments" per chunk list to be
  278. * registered or invalidated. Must handle a Reply chunk:
  279. */
  280. enum {
  281. RPCRDMA_MAX_IOV_SEGS = 3,
  282. RPCRDMA_MAX_DATA_SEGS = ((1 * 1024 * 1024) / PAGE_SIZE) + 1,
  283. RPCRDMA_MAX_SEGS = RPCRDMA_MAX_DATA_SEGS +
  284. RPCRDMA_MAX_IOV_SEGS,
  285. };
  286. struct rpcrdma_mr_seg { /* chunk descriptors */
  287. u32 mr_len; /* length of chunk or segment */
  288. struct page *mr_page; /* owning page, if any */
  289. char *mr_offset; /* kva if no page, else offset */
  290. };
  291. /* The Send SGE array is provisioned to send a maximum size
  292. * inline request:
  293. * - RPC-over-RDMA header
  294. * - xdr_buf head iovec
  295. * - RPCRDMA_MAX_INLINE bytes, in pages
  296. * - xdr_buf tail iovec
  297. *
  298. * The actual number of array elements consumed by each RPC
  299. * depends on the device's max_sge limit.
  300. */
  301. enum {
  302. RPCRDMA_MIN_SEND_SGES = 3,
  303. RPCRDMA_MAX_PAGE_SGES = RPCRDMA_MAX_INLINE >> PAGE_SHIFT,
  304. RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1,
  305. };
  306. struct rpcrdma_buffer;
  307. struct rpcrdma_req {
  308. struct list_head rl_list;
  309. __be32 rl_xid;
  310. unsigned int rl_mapped_sges;
  311. unsigned int rl_connect_cookie;
  312. struct rpcrdma_buffer *rl_buffer;
  313. struct rpcrdma_rep *rl_reply;
  314. struct ib_send_wr rl_send_wr;
  315. struct ib_sge rl_send_sge[RPCRDMA_MAX_SEND_SGES];
  316. struct rpcrdma_regbuf *rl_rdmabuf; /* xprt header */
  317. struct rpcrdma_regbuf *rl_sendbuf; /* rq_snd_buf */
  318. struct rpcrdma_regbuf *rl_recvbuf; /* rq_rcv_buf */
  319. struct ib_cqe rl_cqe;
  320. struct list_head rl_all;
  321. bool rl_backchannel;
  322. struct list_head rl_registered; /* registered segments */
  323. struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
  324. };
  325. static inline void
  326. rpcrdma_set_xprtdata(struct rpc_rqst *rqst, struct rpcrdma_req *req)
  327. {
  328. rqst->rq_xprtdata = req;
  329. }
  330. static inline struct rpcrdma_req *
  331. rpcr_to_rdmar(struct rpc_rqst *rqst)
  332. {
  333. return rqst->rq_xprtdata;
  334. }
  335. static inline void
  336. rpcrdma_push_mw(struct rpcrdma_mw *mw, struct list_head *list)
  337. {
  338. list_add_tail(&mw->mw_list, list);
  339. }
  340. static inline struct rpcrdma_mw *
  341. rpcrdma_pop_mw(struct list_head *list)
  342. {
  343. struct rpcrdma_mw *mw;
  344. mw = list_first_entry(list, struct rpcrdma_mw, mw_list);
  345. list_del(&mw->mw_list);
  346. return mw;
  347. }
  348. /*
  349. * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for
  350. * inline requests/replies, and client/server credits.
  351. *
  352. * One of these is associated with a transport instance
  353. */
  354. struct rpcrdma_buffer {
  355. spinlock_t rb_mwlock; /* protect rb_mws list */
  356. struct list_head rb_mws;
  357. struct list_head rb_all;
  358. spinlock_t rb_lock; /* protect buf lists */
  359. int rb_send_count, rb_recv_count;
  360. struct list_head rb_send_bufs;
  361. struct list_head rb_recv_bufs;
  362. struct list_head rb_pending;
  363. u32 rb_max_requests;
  364. atomic_t rb_credits; /* most recent credit grant */
  365. u32 rb_bc_srv_max_requests;
  366. spinlock_t rb_reqslock; /* protect rb_allreqs */
  367. struct list_head rb_allreqs;
  368. u32 rb_bc_max_requests;
  369. spinlock_t rb_recovery_lock; /* protect rb_stale_mrs */
  370. struct list_head rb_stale_mrs;
  371. struct delayed_work rb_recovery_worker;
  372. struct delayed_work rb_refresh_worker;
  373. };
  374. #define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)
  375. /*
  376. * Internal structure for transport instance creation. This
  377. * exists primarily for modularity.
  378. *
  379. * This data should be set with mount options
  380. */
  381. struct rpcrdma_create_data_internal {
  382. struct sockaddr_storage addr; /* RDMA server address */
  383. unsigned int max_requests; /* max requests (slots) in flight */
  384. unsigned int rsize; /* mount rsize - max read hdr+data */
  385. unsigned int wsize; /* mount wsize - max write hdr+data */
  386. unsigned int inline_rsize; /* max non-rdma read data payload */
  387. unsigned int inline_wsize; /* max non-rdma write data payload */
  388. unsigned int padding; /* non-rdma write header padding */
  389. };
  390. /*
  391. * Statistics for RPCRDMA
  392. */
  393. struct rpcrdma_stats {
  394. unsigned long read_chunk_count;
  395. unsigned long write_chunk_count;
  396. unsigned long reply_chunk_count;
  397. unsigned long long total_rdma_request;
  398. unsigned long long total_rdma_reply;
  399. unsigned long long pullup_copy_count;
  400. unsigned long long fixup_copy_count;
  401. unsigned long hardway_register_count;
  402. unsigned long failed_marshal_count;
  403. unsigned long bad_reply_count;
  404. unsigned long nomsg_call_count;
  405. unsigned long bcall_count;
  406. unsigned long mrs_recovered;
  407. unsigned long mrs_orphaned;
  408. unsigned long mrs_allocated;
  409. unsigned long local_inv_needed;
  410. };
  411. /*
  412. * Per-registration mode operations
  413. */
  414. struct rpcrdma_xprt;
  415. struct rpcrdma_memreg_ops {
  416. int (*ro_map)(struct rpcrdma_xprt *,
  417. struct rpcrdma_mr_seg *, int, bool,
  418. struct rpcrdma_mw **);
  419. void (*ro_unmap_sync)(struct rpcrdma_xprt *,
  420. struct list_head *);
  421. void (*ro_unmap_safe)(struct rpcrdma_xprt *,
  422. struct rpcrdma_req *, bool);
  423. void (*ro_recover_mr)(struct rpcrdma_mw *);
  424. int (*ro_open)(struct rpcrdma_ia *,
  425. struct rpcrdma_ep *,
  426. struct rpcrdma_create_data_internal *);
  427. size_t (*ro_maxpages)(struct rpcrdma_xprt *);
  428. int (*ro_init_mr)(struct rpcrdma_ia *,
  429. struct rpcrdma_mw *);
  430. void (*ro_release_mr)(struct rpcrdma_mw *);
  431. const char *ro_displayname;
  432. const int ro_send_w_inv_ok;
  433. };
  434. extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops;
  435. extern const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops;
  436. /*
  437. * RPCRDMA transport -- encapsulates the structures above for
  438. * integration with RPC.
  439. *
  440. * The contained structures are embedded, not pointers,
  441. * for convenience. This structure need not be visible externally.
  442. *
  443. * It is allocated and initialized during mount, and released
  444. * during unmount.
  445. */
  446. struct rpcrdma_xprt {
  447. struct rpc_xprt rx_xprt;
  448. struct rpcrdma_ia rx_ia;
  449. struct rpcrdma_ep rx_ep;
  450. struct rpcrdma_buffer rx_buf;
  451. struct rpcrdma_create_data_internal rx_data;
  452. struct delayed_work rx_connect_worker;
  453. struct rpcrdma_stats rx_stats;
  454. };
  455. #define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt)
  456. #define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data)
  457. /* Setting this to 0 ensures interoperability with early servers.
  458. * Setting this to 1 enhances certain unaligned read/write performance.
  459. * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */
  460. extern int xprt_rdma_pad_optimize;
  461. /* This setting controls the hunt for a supported memory
  462. * registration strategy.
  463. */
  464. extern unsigned int xprt_rdma_memreg_strategy;
  465. /*
  466. * Interface Adapter calls - xprtrdma/verbs.c
  467. */
  468. int rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr);
  469. void rpcrdma_ia_remove(struct rpcrdma_ia *ia);
  470. void rpcrdma_ia_close(struct rpcrdma_ia *);
  471. bool frwr_is_supported(struct rpcrdma_ia *);
  472. bool fmr_is_supported(struct rpcrdma_ia *);
  473. /*
  474. * Endpoint calls - xprtrdma/verbs.c
  475. */
  476. int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *,
  477. struct rpcrdma_create_data_internal *);
  478. void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
  479. int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
  480. void rpcrdma_conn_func(struct rpcrdma_ep *ep);
  481. void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
  482. int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
  483. struct rpcrdma_req *);
  484. int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_rep *);
  485. /*
  486. * Buffer calls - xprtrdma/verbs.c
  487. */
  488. struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *);
  489. struct rpcrdma_rep *rpcrdma_create_rep(struct rpcrdma_xprt *);
  490. void rpcrdma_destroy_req(struct rpcrdma_req *);
  491. int rpcrdma_buffer_create(struct rpcrdma_xprt *);
  492. void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
  493. static inline void
  494. rpcrdma_insert_req(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
  495. {
  496. spin_lock(&buffers->rb_lock);
  497. if (list_empty(&req->rl_list))
  498. list_add_tail(&req->rl_list, &buffers->rb_pending);
  499. spin_unlock(&buffers->rb_lock);
  500. }
  501. static inline struct rpcrdma_req *
  502. rpcrdma_lookup_req_locked(struct rpcrdma_buffer *buffers, __be32 xid)
  503. {
  504. struct rpcrdma_req *pos;
  505. list_for_each_entry(pos, &buffers->rb_pending, rl_list)
  506. if (pos->rl_xid == xid)
  507. return pos;
  508. return NULL;
  509. }
  510. static inline void
  511. rpcrdma_remove_req(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
  512. {
  513. spin_lock(&buffers->rb_lock);
  514. list_del(&req->rl_list);
  515. spin_unlock(&buffers->rb_lock);
  516. }
  517. struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *);
  518. void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *);
  519. struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
  520. void rpcrdma_buffer_put(struct rpcrdma_req *);
  521. void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
  522. void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
  523. void rpcrdma_defer_mr_recovery(struct rpcrdma_mw *);
  524. struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(size_t, enum dma_data_direction,
  525. gfp_t);
  526. bool __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *, struct rpcrdma_regbuf *);
  527. void rpcrdma_free_regbuf(struct rpcrdma_regbuf *);
  528. static inline bool
  529. rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb)
  530. {
  531. return rb->rg_device != NULL;
  532. }
  533. static inline bool
  534. rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
  535. {
  536. if (likely(rpcrdma_regbuf_is_mapped(rb)))
  537. return true;
  538. return __rpcrdma_dma_map_regbuf(ia, rb);
  539. }
  540. int rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *, unsigned int);
  541. int rpcrdma_alloc_wq(void);
  542. void rpcrdma_destroy_wq(void);
  543. /*
  544. * Wrappers for chunk registration, shared by read/write chunk code.
  545. */
  546. static inline enum dma_data_direction
  547. rpcrdma_data_dir(bool writing)
  548. {
  549. return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  550. }
  551. /*
  552. * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
  553. */
  554. enum rpcrdma_chunktype {
  555. rpcrdma_noch = 0,
  556. rpcrdma_readch,
  557. rpcrdma_areadch,
  558. rpcrdma_writech,
  559. rpcrdma_replych
  560. };
  561. bool rpcrdma_prepare_send_sges(struct rpcrdma_ia *, struct rpcrdma_req *,
  562. u32, struct xdr_buf *, enum rpcrdma_chunktype);
  563. void rpcrdma_unmap_sges(struct rpcrdma_ia *, struct rpcrdma_req *);
  564. int rpcrdma_marshal_req(struct rpc_rqst *);
  565. void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
  566. void rpcrdma_reply_handler(struct work_struct *work);
  567. /* RPC/RDMA module init - xprtrdma/transport.c
  568. */
  569. extern unsigned int xprt_rdma_max_inline_read;
  570. void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
  571. void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
  572. void rpcrdma_connect_worker(struct work_struct *work);
  573. void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
  574. int xprt_rdma_init(void);
  575. void xprt_rdma_cleanup(void);
  576. /* Backchannel calls - xprtrdma/backchannel.c
  577. */
  578. #if defined(CONFIG_SUNRPC_BACKCHANNEL)
  579. int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int);
  580. int xprt_rdma_bc_up(struct svc_serv *, struct net *);
  581. size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *);
  582. int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
  583. void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *);
  584. int rpcrdma_bc_marshal_reply(struct rpc_rqst *);
  585. void xprt_rdma_bc_free_rqst(struct rpc_rqst *);
  586. void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int);
  587. #endif /* CONFIG_SUNRPC_BACKCHANNEL */
  588. extern struct xprt_class xprt_rdma_bc;
  589. #endif /* _LINUX_SUNRPC_XPRT_RDMA_H */