xprt_rdma.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548
  1. /*
  2. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the BSD-type
  8. * license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. *
  14. * Redistributions of source code must retain the above copyright
  15. * notice, this list of conditions and the following disclaimer.
  16. *
  17. * Redistributions in binary form must reproduce the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer in the documentation and/or other materials provided
  20. * with the distribution.
  21. *
  22. * Neither the name of the Network Appliance, Inc. nor the names of
  23. * its contributors may be used to endorse or promote products
  24. * derived from this software without specific prior written
  25. * permission.
  26. *
  27. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  38. */
  39. #ifndef _LINUX_SUNRPC_XPRT_RDMA_H
  40. #define _LINUX_SUNRPC_XPRT_RDMA_H
  41. #include <linux/wait.h> /* wait_queue_head_t, etc */
  42. #include <linux/spinlock.h> /* spinlock_t, etc */
  43. #include <linux/atomic.h> /* atomic_t, etc */
  44. #include <linux/workqueue.h> /* struct work_struct */
  45. #include <rdma/rdma_cm.h> /* RDMA connection api */
  46. #include <rdma/ib_verbs.h> /* RDMA verbs api */
  47. #include <linux/sunrpc/clnt.h> /* rpc_xprt */
  48. #include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */
  49. #include <linux/sunrpc/xprtrdma.h> /* xprt parameters */
  50. #define RDMA_RESOLVE_TIMEOUT (5000) /* 5 seconds */
  51. #define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */
  52. #define RPCRDMA_BIND_TO (60U * HZ)
  53. #define RPCRDMA_INIT_REEST_TO (5U * HZ)
  54. #define RPCRDMA_MAX_REEST_TO (30U * HZ)
  55. #define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ)
  56. /*
  57. * Interface Adapter -- one per transport instance
  58. */
  59. struct rpcrdma_ia {
  60. const struct rpcrdma_memreg_ops *ri_ops;
  61. rwlock_t ri_qplock;
  62. struct ib_device *ri_device;
  63. struct rdma_cm_id *ri_id;
  64. struct ib_pd *ri_pd;
  65. struct ib_mr *ri_dma_mr;
  66. struct completion ri_done;
  67. int ri_async_rc;
  68. unsigned int ri_max_frmr_depth;
  69. struct ib_qp_attr ri_qp_attr;
  70. struct ib_qp_init_attr ri_qp_init_attr;
  71. };
  72. /*
  73. * RDMA Endpoint -- one per transport instance
  74. */
  75. struct rpcrdma_ep {
  76. atomic_t rep_cqcount;
  77. int rep_cqinit;
  78. int rep_connected;
  79. struct ib_qp_init_attr rep_attr;
  80. wait_queue_head_t rep_connect_wait;
  81. struct rdma_conn_param rep_remote_cma;
  82. struct sockaddr_storage rep_remote_addr;
  83. struct delayed_work rep_connect_worker;
  84. };
  85. #define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
  86. #define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
  87. /* Force completion handler to ignore the signal
  88. */
  89. #define RPCRDMA_IGNORE_COMPLETION (0ULL)
  90. /* Pre-allocate extra Work Requests for handling backward receives
  91. * and sends. This is a fixed value because the Work Queues are
  92. * allocated when the forward channel is set up.
  93. */
  94. #if defined(CONFIG_SUNRPC_BACKCHANNEL)
  95. #define RPCRDMA_BACKWARD_WRS (8)
  96. #else
  97. #define RPCRDMA_BACKWARD_WRS (0)
  98. #endif
  99. /* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
  100. *
  101. * The below structure appears at the front of a large region of kmalloc'd
  102. * memory, which always starts on a good alignment boundary.
  103. */
  104. struct rpcrdma_regbuf {
  105. size_t rg_size;
  106. struct rpcrdma_req *rg_owner;
  107. struct ib_sge rg_iov;
  108. __be32 rg_base[0] __attribute__ ((aligned(256)));
  109. };
  110. static inline u64
  111. rdmab_addr(struct rpcrdma_regbuf *rb)
  112. {
  113. return rb->rg_iov.addr;
  114. }
  115. static inline u32
  116. rdmab_length(struct rpcrdma_regbuf *rb)
  117. {
  118. return rb->rg_iov.length;
  119. }
  120. static inline u32
  121. rdmab_lkey(struct rpcrdma_regbuf *rb)
  122. {
  123. return rb->rg_iov.lkey;
  124. }
  125. static inline struct rpcrdma_msg *
  126. rdmab_to_msg(struct rpcrdma_regbuf *rb)
  127. {
  128. return (struct rpcrdma_msg *)rb->rg_base;
  129. }
  130. #define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN)
  131. /*
  132. * struct rpcrdma_rep -- this structure encapsulates state required to recv
  133. * and complete a reply, asychronously. It needs several pieces of
  134. * state:
  135. * o recv buffer (posted to provider)
  136. * o ib_sge (also donated to provider)
  137. * o status of reply (length, success or not)
  138. * o bookkeeping state to get run by tasklet (list, etc)
  139. *
  140. * These are allocated during initialization, per-transport instance;
  141. * however, the tasklet execution list itself is global, as it should
  142. * always be pretty short.
  143. *
  144. * N of these are associated with a transport instance, and stored in
  145. * struct rpcrdma_buffer. N is the max number of outstanding requests.
  146. */
  147. #define RPCRDMA_MAX_DATA_SEGS ((1 * 1024 * 1024) / PAGE_SIZE)
  148. #define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */
  149. struct rpcrdma_buffer;
  150. struct rpcrdma_rep {
  151. unsigned int rr_len;
  152. struct ib_device *rr_device;
  153. struct rpcrdma_xprt *rr_rxprt;
  154. struct work_struct rr_work;
  155. struct list_head rr_list;
  156. struct rpcrdma_regbuf *rr_rdmabuf;
  157. };
  158. #define RPCRDMA_BAD_LEN (~0U)
  159. /*
  160. * struct rpcrdma_mw - external memory region metadata
  161. *
  162. * An external memory region is any buffer or page that is registered
  163. * on the fly (ie, not pre-registered).
  164. *
  165. * Each rpcrdma_buffer has a list of free MWs anchored in rb_mws. During
  166. * call_allocate, rpcrdma_buffer_get() assigns one to each segment in
  167. * an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep
  168. * track of registration metadata while each RPC is pending.
  169. * rpcrdma_deregister_external() uses this metadata to unmap and
  170. * release these resources when an RPC is complete.
  171. */
  172. enum rpcrdma_frmr_state {
  173. FRMR_IS_INVALID, /* ready to be used */
  174. FRMR_IS_VALID, /* in use */
  175. FRMR_IS_STALE, /* failed completion */
  176. };
  177. struct rpcrdma_frmr {
  178. struct scatterlist *sg;
  179. int sg_nents;
  180. struct ib_mr *fr_mr;
  181. enum rpcrdma_frmr_state fr_state;
  182. struct work_struct fr_work;
  183. struct rpcrdma_xprt *fr_xprt;
  184. bool fr_waiter;
  185. struct completion fr_linv_done;;
  186. union {
  187. struct ib_reg_wr fr_regwr;
  188. struct ib_send_wr fr_invwr;
  189. };
  190. };
  191. struct rpcrdma_fmr {
  192. struct ib_fmr *fmr;
  193. u64 *physaddrs;
  194. };
  195. struct rpcrdma_mw {
  196. union {
  197. struct rpcrdma_fmr fmr;
  198. struct rpcrdma_frmr frmr;
  199. } r;
  200. void (*mw_sendcompletion)(struct ib_wc *);
  201. struct list_head mw_list;
  202. struct list_head mw_all;
  203. };
  204. /*
  205. * struct rpcrdma_req -- structure central to the request/reply sequence.
  206. *
  207. * N of these are associated with a transport instance, and stored in
  208. * struct rpcrdma_buffer. N is the max number of outstanding requests.
  209. *
  210. * It includes pre-registered buffer memory for send AND recv.
  211. * The recv buffer, however, is not owned by this structure, and
  212. * is "donated" to the hardware when a recv is posted. When a
  213. * reply is handled, the recv buffer used is given back to the
  214. * struct rpcrdma_req associated with the request.
  215. *
  216. * In addition to the basic memory, this structure includes an array
  217. * of iovs for send operations. The reason is that the iovs passed to
  218. * ib_post_{send,recv} must not be modified until the work request
  219. * completes.
  220. *
  221. * NOTES:
  222. * o RPCRDMA_MAX_SEGS is the max number of addressible chunk elements we
  223. * marshal. The number needed varies depending on the iov lists that
  224. * are passed to us, the memory registration mode we are in, and if
  225. * physical addressing is used, the layout.
  226. */
  227. struct rpcrdma_mr_seg { /* chunk descriptors */
  228. struct rpcrdma_mw *rl_mw; /* registered MR */
  229. u64 mr_base; /* registration result */
  230. u32 mr_rkey; /* registration result */
  231. u32 mr_len; /* length of chunk or segment */
  232. int mr_nsegs; /* number of segments in chunk or 0 */
  233. enum dma_data_direction mr_dir; /* segment mapping direction */
  234. dma_addr_t mr_dma; /* segment mapping address */
  235. size_t mr_dmalen; /* segment mapping length */
  236. struct page *mr_page; /* owning page, if any */
  237. char *mr_offset; /* kva if no page, else offset */
  238. };
  239. #define RPCRDMA_MAX_IOVS (2)
  240. struct rpcrdma_req {
  241. struct list_head rl_free;
  242. unsigned int rl_niovs;
  243. unsigned int rl_nchunks;
  244. unsigned int rl_connect_cookie;
  245. struct rpcrdma_buffer *rl_buffer;
  246. struct rpcrdma_rep *rl_reply;/* holder for reply buffer */
  247. struct ib_sge rl_send_iov[RPCRDMA_MAX_IOVS];
  248. struct rpcrdma_regbuf *rl_rdmabuf;
  249. struct rpcrdma_regbuf *rl_sendbuf;
  250. struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
  251. struct list_head rl_all;
  252. bool rl_backchannel;
  253. };
  254. static inline struct rpcrdma_req *
  255. rpcr_to_rdmar(struct rpc_rqst *rqst)
  256. {
  257. void *buffer = rqst->rq_buffer;
  258. struct rpcrdma_regbuf *rb;
  259. rb = container_of(buffer, struct rpcrdma_regbuf, rg_base);
  260. return rb->rg_owner;
  261. }
  262. /*
  263. * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for
  264. * inline requests/replies, and client/server credits.
  265. *
  266. * One of these is associated with a transport instance
  267. */
  268. struct rpcrdma_buffer {
  269. spinlock_t rb_mwlock; /* protect rb_mws list */
  270. struct list_head rb_mws;
  271. struct list_head rb_all;
  272. char *rb_pool;
  273. spinlock_t rb_lock; /* protect buf lists */
  274. struct list_head rb_send_bufs;
  275. struct list_head rb_recv_bufs;
  276. u32 rb_max_requests;
  277. u32 rb_bc_srv_max_requests;
  278. spinlock_t rb_reqslock; /* protect rb_allreqs */
  279. struct list_head rb_allreqs;
  280. u32 rb_bc_max_requests;
  281. };
  282. #define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)
  283. /*
  284. * Internal structure for transport instance creation. This
  285. * exists primarily for modularity.
  286. *
  287. * This data should be set with mount options
  288. */
  289. struct rpcrdma_create_data_internal {
  290. struct sockaddr_storage addr; /* RDMA server address */
  291. unsigned int max_requests; /* max requests (slots) in flight */
  292. unsigned int rsize; /* mount rsize - max read hdr+data */
  293. unsigned int wsize; /* mount wsize - max write hdr+data */
  294. unsigned int inline_rsize; /* max non-rdma read data payload */
  295. unsigned int inline_wsize; /* max non-rdma write data payload */
  296. unsigned int padding; /* non-rdma write header padding */
  297. };
  298. #define RPCRDMA_INLINE_READ_THRESHOLD(rq) \
  299. (rpcx_to_rdmad(rq->rq_xprt).inline_rsize)
  300. #define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\
  301. (rpcx_to_rdmad(rq->rq_xprt).inline_wsize)
  302. #define RPCRDMA_INLINE_PAD_VALUE(rq)\
  303. rpcx_to_rdmad(rq->rq_xprt).padding
  304. /*
  305. * Statistics for RPCRDMA
  306. */
  307. struct rpcrdma_stats {
  308. unsigned long read_chunk_count;
  309. unsigned long write_chunk_count;
  310. unsigned long reply_chunk_count;
  311. unsigned long long total_rdma_request;
  312. unsigned long long total_rdma_reply;
  313. unsigned long long pullup_copy_count;
  314. unsigned long long fixup_copy_count;
  315. unsigned long hardway_register_count;
  316. unsigned long failed_marshal_count;
  317. unsigned long bad_reply_count;
  318. unsigned long nomsg_call_count;
  319. unsigned long bcall_count;
  320. };
  321. /*
  322. * Per-registration mode operations
  323. */
  324. struct rpcrdma_xprt;
  325. struct rpcrdma_memreg_ops {
  326. int (*ro_map)(struct rpcrdma_xprt *,
  327. struct rpcrdma_mr_seg *, int, bool);
  328. void (*ro_unmap_sync)(struct rpcrdma_xprt *,
  329. struct rpcrdma_req *);
  330. int (*ro_unmap)(struct rpcrdma_xprt *,
  331. struct rpcrdma_mr_seg *);
  332. int (*ro_open)(struct rpcrdma_ia *,
  333. struct rpcrdma_ep *,
  334. struct rpcrdma_create_data_internal *);
  335. size_t (*ro_maxpages)(struct rpcrdma_xprt *);
  336. int (*ro_init)(struct rpcrdma_xprt *);
  337. void (*ro_destroy)(struct rpcrdma_buffer *);
  338. const char *ro_displayname;
  339. };
  340. extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops;
  341. extern const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops;
  342. extern const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops;
  343. /*
  344. * RPCRDMA transport -- encapsulates the structures above for
  345. * integration with RPC.
  346. *
  347. * The contained structures are embedded, not pointers,
  348. * for convenience. This structure need not be visible externally.
  349. *
  350. * It is allocated and initialized during mount, and released
  351. * during unmount.
  352. */
  353. struct rpcrdma_xprt {
  354. struct rpc_xprt rx_xprt;
  355. struct rpcrdma_ia rx_ia;
  356. struct rpcrdma_ep rx_ep;
  357. struct rpcrdma_buffer rx_buf;
  358. struct rpcrdma_create_data_internal rx_data;
  359. struct delayed_work rx_connect_worker;
  360. struct rpcrdma_stats rx_stats;
  361. };
  362. #define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt)
  363. #define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data)
  364. /* Setting this to 0 ensures interoperability with early servers.
  365. * Setting this to 1 enhances certain unaligned read/write performance.
  366. * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */
  367. extern int xprt_rdma_pad_optimize;
  368. /*
  369. * Interface Adapter calls - xprtrdma/verbs.c
  370. */
  371. int rpcrdma_ia_open(struct rpcrdma_xprt *, struct sockaddr *, int);
  372. void rpcrdma_ia_close(struct rpcrdma_ia *);
  373. /*
  374. * Endpoint calls - xprtrdma/verbs.c
  375. */
  376. int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *,
  377. struct rpcrdma_create_data_internal *);
  378. void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
  379. int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
  380. void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
  381. int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
  382. struct rpcrdma_req *);
  383. int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *,
  384. struct rpcrdma_rep *);
  385. /*
  386. * Buffer calls - xprtrdma/verbs.c
  387. */
  388. struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *);
  389. struct rpcrdma_rep *rpcrdma_create_rep(struct rpcrdma_xprt *);
  390. void rpcrdma_destroy_req(struct rpcrdma_ia *, struct rpcrdma_req *);
  391. int rpcrdma_buffer_create(struct rpcrdma_xprt *);
  392. void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
  393. struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *);
  394. void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *);
  395. struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
  396. void rpcrdma_buffer_put(struct rpcrdma_req *);
  397. void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
  398. void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
  399. struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *,
  400. size_t, gfp_t);
  401. void rpcrdma_free_regbuf(struct rpcrdma_ia *,
  402. struct rpcrdma_regbuf *);
  403. unsigned int rpcrdma_max_segments(struct rpcrdma_xprt *);
  404. int rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *, unsigned int);
  405. int frwr_alloc_recovery_wq(void);
  406. void frwr_destroy_recovery_wq(void);
  407. int rpcrdma_alloc_wq(void);
  408. void rpcrdma_destroy_wq(void);
  409. /*
  410. * Wrappers for chunk registration, shared by read/write chunk code.
  411. */
  412. void rpcrdma_mapping_error(struct rpcrdma_mr_seg *);
  413. static inline enum dma_data_direction
  414. rpcrdma_data_dir(bool writing)
  415. {
  416. return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  417. }
  418. static inline void
  419. rpcrdma_map_one(struct ib_device *device, struct rpcrdma_mr_seg *seg,
  420. enum dma_data_direction direction)
  421. {
  422. seg->mr_dir = direction;
  423. seg->mr_dmalen = seg->mr_len;
  424. if (seg->mr_page)
  425. seg->mr_dma = ib_dma_map_page(device,
  426. seg->mr_page, offset_in_page(seg->mr_offset),
  427. seg->mr_dmalen, seg->mr_dir);
  428. else
  429. seg->mr_dma = ib_dma_map_single(device,
  430. seg->mr_offset,
  431. seg->mr_dmalen, seg->mr_dir);
  432. if (ib_dma_mapping_error(device, seg->mr_dma))
  433. rpcrdma_mapping_error(seg);
  434. }
  435. static inline void
  436. rpcrdma_unmap_one(struct ib_device *device, struct rpcrdma_mr_seg *seg)
  437. {
  438. if (seg->mr_page)
  439. ib_dma_unmap_page(device,
  440. seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
  441. else
  442. ib_dma_unmap_single(device,
  443. seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
  444. }
  445. /*
  446. * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c
  447. */
  448. void rpcrdma_connect_worker(struct work_struct *);
  449. void rpcrdma_conn_func(struct rpcrdma_ep *);
  450. void rpcrdma_reply_handler(struct rpcrdma_rep *);
  451. /*
  452. * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
  453. */
  454. int rpcrdma_marshal_req(struct rpc_rqst *);
  455. /* RPC/RDMA module init - xprtrdma/transport.c
  456. */
  457. extern unsigned int xprt_rdma_max_inline_read;
  458. void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
  459. void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
  460. void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
  461. int xprt_rdma_init(void);
  462. void xprt_rdma_cleanup(void);
  463. /* Backchannel calls - xprtrdma/backchannel.c
  464. */
  465. #if defined(CONFIG_SUNRPC_BACKCHANNEL)
  466. int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int);
  467. int xprt_rdma_bc_up(struct svc_serv *, struct net *);
  468. int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
  469. void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *);
  470. int rpcrdma_bc_marshal_reply(struct rpc_rqst *);
  471. void xprt_rdma_bc_free_rqst(struct rpc_rqst *);
  472. void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int);
  473. #endif /* CONFIG_SUNRPC_BACKCHANNEL */
  474. extern struct xprt_class xprt_rdma_bc;
  475. #endif /* _LINUX_SUNRPC_XPRT_RDMA_H */