xprt_rdma.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. /*
  2. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the BSD-type
  8. * license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. *
  14. * Redistributions of source code must retain the above copyright
  15. * notice, this list of conditions and the following disclaimer.
  16. *
  17. * Redistributions in binary form must reproduce the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer in the documentation and/or other materials provided
  20. * with the distribution.
  21. *
  22. * Neither the name of the Network Appliance, Inc. nor the names of
  23. * its contributors may be used to endorse or promote products
  24. * derived from this software without specific prior written
  25. * permission.
  26. *
  27. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  38. */
  39. #ifndef _LINUX_SUNRPC_XPRT_RDMA_H
  40. #define _LINUX_SUNRPC_XPRT_RDMA_H
  41. #include <linux/wait.h> /* wait_queue_head_t, etc */
  42. #include <linux/spinlock.h> /* spinlock_t, etc */
  43. #include <linux/atomic.h> /* atomic_t, etc */
  44. #include <linux/workqueue.h> /* struct work_struct */
  45. #include <rdma/rdma_cm.h> /* RDMA connection api */
  46. #include <rdma/ib_verbs.h> /* RDMA verbs api */
  47. #include <linux/sunrpc/clnt.h> /* rpc_xprt */
  48. #include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */
  49. #include <linux/sunrpc/xprtrdma.h> /* xprt parameters */
  50. #include <linux/sunrpc/svc.h> /* RPCSVC_MAXPAYLOAD */
  51. #define RDMA_RESOLVE_TIMEOUT (5000) /* 5 seconds */
  52. #define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */
  53. /*
  54. * Interface Adapter -- one per transport instance
  55. */
  56. struct rpcrdma_ia {
  57. rwlock_t ri_qplock;
  58. struct rdma_cm_id *ri_id;
  59. struct ib_pd *ri_pd;
  60. struct ib_mr *ri_bind_mem;
  61. u32 ri_dma_lkey;
  62. int ri_have_dma_lkey;
  63. struct completion ri_done;
  64. int ri_async_rc;
  65. enum rpcrdma_memreg ri_memreg_strategy;
  66. unsigned int ri_max_frmr_depth;
  67. struct ib_device_attr ri_devattr;
  68. struct ib_qp_attr ri_qp_attr;
  69. struct ib_qp_init_attr ri_qp_init_attr;
  70. };
  71. /*
  72. * RDMA Endpoint -- one per transport instance
  73. */
  74. #define RPCRDMA_WC_BUDGET (128)
  75. #define RPCRDMA_POLLSIZE (16)
  76. struct rpcrdma_ep {
  77. atomic_t rep_cqcount;
  78. int rep_cqinit;
  79. int rep_connected;
  80. struct ib_qp_init_attr rep_attr;
  81. wait_queue_head_t rep_connect_wait;
  82. struct rpcrdma_regbuf *rep_padbuf;
  83. struct rdma_conn_param rep_remote_cma;
  84. struct sockaddr_storage rep_remote_addr;
  85. struct delayed_work rep_connect_worker;
  86. struct ib_wc rep_send_wcs[RPCRDMA_POLLSIZE];
  87. struct ib_wc rep_recv_wcs[RPCRDMA_POLLSIZE];
  88. };
  89. /*
  90. * Force a signaled SEND Work Request every so often,
  91. * in case the provider needs to do some housekeeping.
  92. */
  93. #define RPCRDMA_MAX_UNSIGNALED_SENDS (32)
  94. #define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
  95. #define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
  96. /* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
  97. *
  98. * The below structure appears at the front of a large region of kmalloc'd
  99. * memory, which always starts on a good alignment boundary.
  100. */
  101. struct rpcrdma_regbuf {
  102. size_t rg_size;
  103. struct rpcrdma_req *rg_owner;
  104. struct ib_mr *rg_mr;
  105. struct ib_sge rg_iov;
  106. __be32 rg_base[0] __attribute__ ((aligned(256)));
  107. };
  108. static inline u64
  109. rdmab_addr(struct rpcrdma_regbuf *rb)
  110. {
  111. return rb->rg_iov.addr;
  112. }
  113. static inline u32
  114. rdmab_length(struct rpcrdma_regbuf *rb)
  115. {
  116. return rb->rg_iov.length;
  117. }
  118. static inline u32
  119. rdmab_lkey(struct rpcrdma_regbuf *rb)
  120. {
  121. return rb->rg_iov.lkey;
  122. }
  123. static inline struct rpcrdma_msg *
  124. rdmab_to_msg(struct rpcrdma_regbuf *rb)
  125. {
  126. return (struct rpcrdma_msg *)rb->rg_base;
  127. }
  128. enum rpcrdma_chunktype {
  129. rpcrdma_noch = 0,
  130. rpcrdma_readch,
  131. rpcrdma_areadch,
  132. rpcrdma_writech,
  133. rpcrdma_replych
  134. };
  135. /*
  136. * struct rpcrdma_rep -- this structure encapsulates state required to recv
  137. * and complete a reply, asychronously. It needs several pieces of
  138. * state:
  139. * o recv buffer (posted to provider)
  140. * o ib_sge (also donated to provider)
  141. * o status of reply (length, success or not)
  142. * o bookkeeping state to get run by tasklet (list, etc)
  143. *
  144. * These are allocated during initialization, per-transport instance;
  145. * however, the tasklet execution list itself is global, as it should
  146. * always be pretty short.
  147. *
  148. * N of these are associated with a transport instance, and stored in
  149. * struct rpcrdma_buffer. N is the max number of outstanding requests.
  150. */
  151. /* temporary static scatter/gather max */
  152. #define RPCRDMA_MAX_DATA_SEGS (64) /* max scatter/gather */
  153. #define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */
  154. struct rpcrdma_buffer;
  155. struct rpcrdma_rep {
  156. unsigned int rr_len;
  157. struct rpcrdma_buffer *rr_buffer;
  158. struct rpc_xprt *rr_xprt;
  159. void (*rr_func)(struct rpcrdma_rep *);
  160. struct list_head rr_list;
  161. struct rpcrdma_regbuf *rr_rdmabuf;
  162. };
  163. /*
  164. * struct rpcrdma_mw - external memory region metadata
  165. *
  166. * An external memory region is any buffer or page that is registered
  167. * on the fly (ie, not pre-registered).
  168. *
  169. * Each rpcrdma_buffer has a list of free MWs anchored in rb_mws. During
  170. * call_allocate, rpcrdma_buffer_get() assigns one to each segment in
  171. * an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep
  172. * track of registration metadata while each RPC is pending.
  173. * rpcrdma_deregister_external() uses this metadata to unmap and
  174. * release these resources when an RPC is complete.
  175. */
  176. enum rpcrdma_frmr_state {
  177. FRMR_IS_INVALID, /* ready to be used */
  178. FRMR_IS_VALID, /* in use */
  179. FRMR_IS_STALE, /* failed completion */
  180. };
  181. struct rpcrdma_frmr {
  182. struct ib_fast_reg_page_list *fr_pgl;
  183. struct ib_mr *fr_mr;
  184. enum rpcrdma_frmr_state fr_state;
  185. };
  186. struct rpcrdma_mw {
  187. union {
  188. struct ib_fmr *fmr;
  189. struct rpcrdma_frmr frmr;
  190. } r;
  191. struct list_head mw_list;
  192. struct list_head mw_all;
  193. };
  194. /*
  195. * struct rpcrdma_req -- structure central to the request/reply sequence.
  196. *
  197. * N of these are associated with a transport instance, and stored in
  198. * struct rpcrdma_buffer. N is the max number of outstanding requests.
  199. *
  200. * It includes pre-registered buffer memory for send AND recv.
  201. * The recv buffer, however, is not owned by this structure, and
  202. * is "donated" to the hardware when a recv is posted. When a
  203. * reply is handled, the recv buffer used is given back to the
  204. * struct rpcrdma_req associated with the request.
  205. *
  206. * In addition to the basic memory, this structure includes an array
  207. * of iovs for send operations. The reason is that the iovs passed to
  208. * ib_post_{send,recv} must not be modified until the work request
  209. * completes.
  210. *
  211. * NOTES:
  212. * o RPCRDMA_MAX_SEGS is the max number of addressible chunk elements we
  213. * marshal. The number needed varies depending on the iov lists that
  214. * are passed to us, the memory registration mode we are in, and if
  215. * physical addressing is used, the layout.
  216. */
  217. struct rpcrdma_mr_seg { /* chunk descriptors */
  218. struct rpcrdma_mw *rl_mw; /* registered MR */
  219. u64 mr_base; /* registration result */
  220. u32 mr_rkey; /* registration result */
  221. u32 mr_len; /* length of chunk or segment */
  222. int mr_nsegs; /* number of segments in chunk or 0 */
  223. enum dma_data_direction mr_dir; /* segment mapping direction */
  224. dma_addr_t mr_dma; /* segment mapping address */
  225. size_t mr_dmalen; /* segment mapping length */
  226. struct page *mr_page; /* owning page, if any */
  227. char *mr_offset; /* kva if no page, else offset */
  228. };
  229. struct rpcrdma_req {
  230. unsigned int rl_niovs; /* 0, 2 or 4 */
  231. unsigned int rl_nchunks; /* non-zero if chunks */
  232. unsigned int rl_connect_cookie; /* retry detection */
  233. enum rpcrdma_chunktype rl_rtype, rl_wtype;
  234. struct rpcrdma_buffer *rl_buffer; /* home base for this structure */
  235. struct rpcrdma_rep *rl_reply;/* holder for reply buffer */
  236. struct ib_sge rl_send_iov[4]; /* for active requests */
  237. struct rpcrdma_regbuf *rl_rdmabuf;
  238. struct rpcrdma_regbuf *rl_sendbuf;
  239. struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
  240. };
  241. static inline struct rpcrdma_req *
  242. rpcr_to_rdmar(struct rpc_rqst *rqst)
  243. {
  244. void *buffer = rqst->rq_buffer;
  245. struct rpcrdma_regbuf *rb;
  246. rb = container_of(buffer, struct rpcrdma_regbuf, rg_base);
  247. return rb->rg_owner;
  248. }
  249. /*
  250. * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for
  251. * inline requests/replies, and client/server credits.
  252. *
  253. * One of these is associated with a transport instance
  254. */
  255. struct rpcrdma_buffer {
  256. spinlock_t rb_lock; /* protects indexes */
  257. u32 rb_max_requests;/* client max requests */
  258. struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */
  259. struct list_head rb_all;
  260. int rb_send_index;
  261. struct rpcrdma_req **rb_send_bufs;
  262. int rb_recv_index;
  263. struct rpcrdma_rep **rb_recv_bufs;
  264. char *rb_pool;
  265. };
  266. #define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)
  267. /*
  268. * Internal structure for transport instance creation. This
  269. * exists primarily for modularity.
  270. *
  271. * This data should be set with mount options
  272. */
  273. struct rpcrdma_create_data_internal {
  274. struct sockaddr_storage addr; /* RDMA server address */
  275. unsigned int max_requests; /* max requests (slots) in flight */
  276. unsigned int rsize; /* mount rsize - max read hdr+data */
  277. unsigned int wsize; /* mount wsize - max write hdr+data */
  278. unsigned int inline_rsize; /* max non-rdma read data payload */
  279. unsigned int inline_wsize; /* max non-rdma write data payload */
  280. unsigned int padding; /* non-rdma write header padding */
  281. };
  282. #define RPCRDMA_INLINE_READ_THRESHOLD(rq) \
  283. (rpcx_to_rdmad(rq->rq_xprt).inline_rsize)
  284. #define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\
  285. (rpcx_to_rdmad(rq->rq_xprt).inline_wsize)
  286. #define RPCRDMA_INLINE_PAD_VALUE(rq)\
  287. rpcx_to_rdmad(rq->rq_xprt).padding
  288. /*
  289. * Statistics for RPCRDMA
  290. */
  291. struct rpcrdma_stats {
  292. unsigned long read_chunk_count;
  293. unsigned long write_chunk_count;
  294. unsigned long reply_chunk_count;
  295. unsigned long long total_rdma_request;
  296. unsigned long long total_rdma_reply;
  297. unsigned long long pullup_copy_count;
  298. unsigned long long fixup_copy_count;
  299. unsigned long hardway_register_count;
  300. unsigned long failed_marshal_count;
  301. unsigned long bad_reply_count;
  302. };
  303. /*
  304. * RPCRDMA transport -- encapsulates the structures above for
  305. * integration with RPC.
  306. *
  307. * The contained structures are embedded, not pointers,
  308. * for convenience. This structure need not be visible externally.
  309. *
  310. * It is allocated and initialized during mount, and released
  311. * during unmount.
  312. */
  313. struct rpcrdma_xprt {
  314. struct rpc_xprt rx_xprt;
  315. struct rpcrdma_ia rx_ia;
  316. struct rpcrdma_ep rx_ep;
  317. struct rpcrdma_buffer rx_buf;
  318. struct rpcrdma_create_data_internal rx_data;
  319. struct delayed_work rx_connect_worker;
  320. struct rpcrdma_stats rx_stats;
  321. };
  322. #define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt)
  323. #define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data)
  324. /* Setting this to 0 ensures interoperability with early servers.
  325. * Setting this to 1 enhances certain unaligned read/write performance.
  326. * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */
  327. extern int xprt_rdma_pad_optimize;
  328. /*
  329. * Interface Adapter calls - xprtrdma/verbs.c
  330. */
  331. int rpcrdma_ia_open(struct rpcrdma_xprt *, struct sockaddr *, int);
  332. void rpcrdma_ia_close(struct rpcrdma_ia *);
  333. /*
  334. * Endpoint calls - xprtrdma/verbs.c
  335. */
  336. int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *,
  337. struct rpcrdma_create_data_internal *);
  338. void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
  339. int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
  340. void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
  341. int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
  342. struct rpcrdma_req *);
  343. int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *,
  344. struct rpcrdma_rep *);
  345. /*
  346. * Buffer calls - xprtrdma/verbs.c
  347. */
  348. int rpcrdma_buffer_create(struct rpcrdma_xprt *);
  349. void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
  350. struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
  351. void rpcrdma_buffer_put(struct rpcrdma_req *);
  352. void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
  353. void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
  354. int rpcrdma_register_external(struct rpcrdma_mr_seg *,
  355. int, int, struct rpcrdma_xprt *);
  356. int rpcrdma_deregister_external(struct rpcrdma_mr_seg *,
  357. struct rpcrdma_xprt *);
  358. struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *,
  359. size_t, gfp_t);
  360. void rpcrdma_free_regbuf(struct rpcrdma_ia *,
  361. struct rpcrdma_regbuf *);
  362. /*
  363. * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c
  364. */
  365. void rpcrdma_connect_worker(struct work_struct *);
  366. void rpcrdma_conn_func(struct rpcrdma_ep *);
  367. void rpcrdma_reply_handler(struct rpcrdma_rep *);
  368. /*
  369. * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
  370. */
  371. ssize_t rpcrdma_marshal_chunks(struct rpc_rqst *, ssize_t);
  372. int rpcrdma_marshal_req(struct rpc_rqst *);
  373. size_t rpcrdma_max_payload(struct rpcrdma_xprt *);
  374. /* Temporary NFS request map cache. Created in svc_rdma.c */
  375. extern struct kmem_cache *svc_rdma_map_cachep;
  376. /* WR context cache. Created in svc_rdma.c */
  377. extern struct kmem_cache *svc_rdma_ctxt_cachep;
  378. /* Workqueue created in svc_rdma.c */
  379. extern struct workqueue_struct *svc_rdma_wq;
  380. #if RPCSVC_MAXPAYLOAD < (RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT)
  381. #define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
  382. #else
  383. #define RPCSVC_MAXPAYLOAD_RDMA (RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT)
  384. #endif
  385. #endif /* _LINUX_SUNRPC_XPRT_RDMA_H */