rdmavt_qp.h 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. #ifndef DEF_RDMAVT_INCQP_H
  2. #define DEF_RDMAVT_INCQP_H
  3. /*
  4. * Copyright(c) 2015 Intel Corporation.
  5. *
  6. * This file is provided under a dual BSD/GPLv2 license. When using or
  7. * redistributing this file, you may do so under either license.
  8. *
  9. * GPL LICENSE SUMMARY
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of version 2 of the GNU General Public License as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * BSD LICENSE
  21. *
  22. * Redistribution and use in source and binary forms, with or without
  23. * modification, are permitted provided that the following conditions
  24. * are met:
  25. *
  26. * - Redistributions of source code must retain the above copyright
  27. * notice, this list of conditions and the following disclaimer.
  28. * - Redistributions in binary form must reproduce the above copyright
  29. * notice, this list of conditions and the following disclaimer in
  30. * the documentation and/or other materials provided with the
  31. * distribution.
  32. * - Neither the name of Intel Corporation nor the names of its
  33. * contributors may be used to endorse or promote products derived
  34. * from this software without specific prior written permission.
  35. *
  36. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  37. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  38. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  39. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  40. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  41. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  42. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  43. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  44. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  45. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  46. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  47. *
  48. */
  49. /*
  50. * Send work request queue entry.
  51. * The size of the sg_list is determined when the QP is created and stored
  52. * in qp->s_max_sge.
  53. */
  54. struct rvt_swqe {
  55. union {
  56. struct ib_send_wr wr; /* don't use wr.sg_list */
  57. struct ib_ud_wr ud_wr;
  58. struct ib_reg_wr reg_wr;
  59. struct ib_rdma_wr rdma_wr;
  60. struct ib_atomic_wr atomic_wr;
  61. };
  62. u32 psn; /* first packet sequence number */
  63. u32 lpsn; /* last packet sequence number */
  64. u32 ssn; /* send sequence number */
  65. u32 length; /* total length of data in sg_list */
  66. struct rvt_sge sg_list[0];
  67. };
  68. /*
  69. * Receive work request queue entry.
  70. * The size of the sg_list is determined when the QP (or SRQ) is created
  71. * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
  72. */
  73. struct rvt_rwqe {
  74. u64 wr_id;
  75. u8 num_sge;
  76. struct ib_sge sg_list[0];
  77. };
  78. /*
  79. * This structure is used to contain the head pointer, tail pointer,
  80. * and receive work queue entries as a single memory allocation so
  81. * it can be mmap'ed into user space.
  82. * Note that the wq array elements are variable size so you can't
  83. * just index into the array to get the N'th element;
  84. * use get_rwqe_ptr() instead.
  85. */
  86. struct rvt_rwq {
  87. u32 head; /* new work requests posted to the head */
  88. u32 tail; /* receives pull requests from here. */
  89. struct rvt_rwqe wq[0];
  90. };
  91. struct rvt_rq {
  92. struct rvt_rwq *wq;
  93. u32 size; /* size of RWQE array */
  94. u8 max_sge;
  95. /* protect changes in this struct */
  96. spinlock_t lock ____cacheline_aligned_in_smp;
  97. };
  98. /*
  99. * This structure is used by rvt_mmap() to validate an offset
  100. * when an mmap() request is made. The vm_area_struct then uses
  101. * this as its vm_private_data.
  102. */
  103. struct rvt_mmap_info {
  104. struct list_head pending_mmaps;
  105. struct ib_ucontext *context;
  106. void *obj;
  107. __u64 offset;
  108. struct kref ref;
  109. unsigned size;
  110. };
  111. #define RVT_MAX_RDMA_ATOMIC 16
  112. /*
  113. * This structure holds the information that the send tasklet needs
  114. * to send a RDMA read response or atomic operation.
  115. */
  116. struct rvt_ack_entry {
  117. u8 opcode;
  118. u8 sent;
  119. u32 psn;
  120. u32 lpsn;
  121. union {
  122. struct rvt_sge rdma_sge;
  123. u64 atomic_data;
  124. };
  125. };
  126. /*
  127. * Variables prefixed with s_ are for the requester (sender).
  128. * Variables prefixed with r_ are for the responder (receiver).
  129. * Variables prefixed with ack_ are for responder replies.
  130. *
  131. * Common variables are protected by both r_rq.lock and s_lock in that order
  132. * which only happens in modify_qp() or changing the QP 'state'.
  133. */
  134. struct rvt_qp {
  135. struct ib_qp ibqp;
  136. void *priv; /* Driver private data */
  137. /* read mostly fields above and below */
  138. struct ib_ah_attr remote_ah_attr;
  139. struct ib_ah_attr alt_ah_attr;
  140. struct rvt_qp __rcu *next; /* link list for QPN hash table */
  141. struct rvt_swqe *s_wq; /* send work queue */
  142. struct rvt_mmap_info *ip;
  143. unsigned long timeout_jiffies; /* computed from timeout */
  144. enum ib_mtu path_mtu;
  145. int srate_mbps; /* s_srate (below) converted to Mbit/s */
  146. u32 remote_qpn;
  147. u32 pmtu; /* decoded from path_mtu */
  148. u32 qkey; /* QKEY for this QP (for UD or RD) */
  149. u32 s_size; /* send work queue size */
  150. u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
  151. u32 s_ahgpsn; /* set to the psn in the copy of the header */
  152. u8 state; /* QP state */
  153. u8 allowed_ops; /* high order bits of allowed opcodes */
  154. u8 qp_access_flags;
  155. u8 alt_timeout; /* Alternate path timeout for this QP */
  156. u8 timeout; /* Timeout for this QP */
  157. u8 s_srate;
  158. u8 s_mig_state;
  159. u8 port_num;
  160. u8 s_pkey_index; /* PKEY index to use */
  161. u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
  162. u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
  163. u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
  164. u8 s_retry_cnt; /* number of times to retry */
  165. u8 s_rnr_retry_cnt;
  166. u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
  167. u8 s_max_sge; /* size of s_wq->sg_list */
  168. u8 s_draining;
  169. /* start of read/write fields */
  170. atomic_t refcount ____cacheline_aligned_in_smp;
  171. wait_queue_head_t wait;
  172. struct rvt_ack_entry s_ack_queue[RVT_MAX_RDMA_ATOMIC + 1]
  173. ____cacheline_aligned_in_smp;
  174. struct rvt_sge_state s_rdma_read_sge;
  175. spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
  176. unsigned long r_aflags;
  177. u64 r_wr_id; /* ID for current receive WQE */
  178. u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
  179. u32 r_len; /* total length of r_sge */
  180. u32 r_rcv_len; /* receive data len processed */
  181. u32 r_psn; /* expected rcv packet sequence number */
  182. u32 r_msn; /* message sequence number */
  183. u8 r_state; /* opcode of last packet received */
  184. u8 r_flags;
  185. u8 r_head_ack_queue; /* index into s_ack_queue[] */
  186. struct list_head rspwait; /* link for waiting to respond */
  187. struct rvt_sge_state r_sge; /* current receive data */
  188. struct rvt_rq r_rq; /* receive work queue */
  189. spinlock_t s_lock ____cacheline_aligned_in_smp;
  190. struct rvt_sge_state *s_cur_sge;
  191. u32 s_flags;
  192. struct rvt_swqe *s_wqe;
  193. struct rvt_sge_state s_sge; /* current send request data */
  194. struct rvt_mregion *s_rdma_mr;
  195. struct sdma_engine *s_sde; /* current sde */
  196. u32 s_cur_size; /* size of send packet in bytes */
  197. u32 s_len; /* total length of s_sge */
  198. u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
  199. u32 s_next_psn; /* PSN for next request */
  200. u32 s_last_psn; /* last response PSN processed */
  201. u32 s_sending_psn; /* lowest PSN that is being sent */
  202. u32 s_sending_hpsn; /* highest PSN that is being sent */
  203. u32 s_psn; /* current packet sequence number */
  204. u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
  205. u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
  206. u32 s_head; /* new entries added here */
  207. u32 s_tail; /* next entry to process */
  208. u32 s_cur; /* current work queue entry */
  209. u32 s_acked; /* last un-ACK'ed entry */
  210. u32 s_last; /* last completed entry */
  211. u32 s_ssn; /* SSN of tail entry */
  212. u32 s_lsn; /* limit sequence number (credit) */
  213. u16 s_hdrwords; /* size of s_hdr in 32 bit words */
  214. u16 s_rdma_ack_cnt;
  215. s8 s_ahgidx;
  216. u8 s_state; /* opcode of last packet sent */
  217. u8 s_ack_state; /* opcode of packet to ACK */
  218. u8 s_nak_state; /* non-zero if NAK is pending */
  219. u8 r_nak_state; /* non-zero if NAK is pending */
  220. u8 s_retry; /* requester retry counter */
  221. u8 s_rnr_retry; /* requester RNR retry counter */
  222. u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
  223. u8 s_tail_ack_queue; /* index into s_ack_queue[] */
  224. struct rvt_sge_state s_ack_rdma_sge;
  225. struct timer_list s_timer;
  226. /*
  227. * This sge list MUST be last. Do not add anything below here.
  228. */
  229. struct rvt_sge r_sg_list[0] /* verified SGEs */
  230. ____cacheline_aligned_in_smp;
  231. };
  232. struct rvt_srq {
  233. struct ib_srq ibsrq;
  234. struct rvt_rq rq;
  235. struct rvt_mmap_info *ip;
  236. /* send signal when number of RWQEs < limit */
  237. u32 limit;
  238. };
  239. #endif /* DEF_RDMAVT_INCQP_H */