smc_cdc.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. /*
  2. * Shared Memory Communications over RDMA (SMC-R) and RoCE
  3. *
  4. * Connection Data Control (CDC)
  5. *
  6. * Copyright IBM Corp. 2016
  7. *
  8. * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
  9. */
  10. #ifndef SMC_CDC_H
  11. #define SMC_CDC_H
  12. #include <linux/kernel.h> /* max_t */
  13. #include <linux/atomic.h>
  14. #include <linux/in.h>
  15. #include <linux/compiler.h>
  16. #include "smc.h"
  17. #include "smc_core.h"
  18. #include "smc_wr.h"
  19. #define SMC_CDC_MSG_TYPE 0xFE
  20. /* in network byte order */
  21. union smc_cdc_cursor { /* SMC cursor */
  22. struct {
  23. __be16 reserved;
  24. __be16 wrap;
  25. __be32 count;
  26. };
  27. #ifdef KERNEL_HAS_ATOMIC64
  28. atomic64_t acurs; /* for atomic processing */
  29. #else
  30. u64 acurs; /* for atomic processing */
  31. #endif
  32. } __aligned(8);
  33. /* in network byte order */
  34. struct smc_cdc_msg {
  35. struct smc_wr_rx_hdr common; /* .type = 0xFE */
  36. u8 len; /* 44 */
  37. __be16 seqno;
  38. __be32 token;
  39. union smc_cdc_cursor prod;
  40. union smc_cdc_cursor cons; /* piggy backed "ack" */
  41. struct smc_cdc_producer_flags prod_flags;
  42. struct smc_cdc_conn_state_flags conn_state_flags;
  43. u8 reserved[18];
  44. } __aligned(8);
  45. static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn)
  46. {
  47. return conn->local_rx_ctrl.conn_state_flags.peer_conn_abort ||
  48. conn->local_rx_ctrl.conn_state_flags.peer_conn_closed;
  49. }
  50. static inline bool smc_cdc_rxed_any_close_or_senddone(
  51. struct smc_connection *conn)
  52. {
  53. return smc_cdc_rxed_any_close(conn) ||
  54. conn->local_rx_ctrl.conn_state_flags.peer_done_writing;
  55. }
  56. static inline void smc_curs_add(int size, union smc_host_cursor *curs,
  57. int value)
  58. {
  59. curs->count += value;
  60. if (curs->count >= size) {
  61. curs->wrap++;
  62. curs->count -= size;
  63. }
  64. }
  65. /* SMC cursors are 8 bytes long and require atomic reading and writing */
  66. static inline u64 smc_curs_read(union smc_host_cursor *curs,
  67. struct smc_connection *conn)
  68. {
  69. #ifndef KERNEL_HAS_ATOMIC64
  70. unsigned long flags;
  71. u64 ret;
  72. spin_lock_irqsave(&conn->acurs_lock, flags);
  73. ret = curs->acurs;
  74. spin_unlock_irqrestore(&conn->acurs_lock, flags);
  75. return ret;
  76. #else
  77. return atomic64_read(&curs->acurs);
  78. #endif
  79. }
  80. static inline u64 smc_curs_read_net(union smc_cdc_cursor *curs,
  81. struct smc_connection *conn)
  82. {
  83. #ifndef KERNEL_HAS_ATOMIC64
  84. unsigned long flags;
  85. u64 ret;
  86. spin_lock_irqsave(&conn->acurs_lock, flags);
  87. ret = curs->acurs;
  88. spin_unlock_irqrestore(&conn->acurs_lock, flags);
  89. return ret;
  90. #else
  91. return atomic64_read(&curs->acurs);
  92. #endif
  93. }
  94. static inline void smc_curs_write(union smc_host_cursor *curs, u64 val,
  95. struct smc_connection *conn)
  96. {
  97. #ifndef KERNEL_HAS_ATOMIC64
  98. unsigned long flags;
  99. spin_lock_irqsave(&conn->acurs_lock, flags);
  100. curs->acurs = val;
  101. spin_unlock_irqrestore(&conn->acurs_lock, flags);
  102. #else
  103. atomic64_set(&curs->acurs, val);
  104. #endif
  105. }
  106. static inline void smc_curs_write_net(union smc_cdc_cursor *curs, u64 val,
  107. struct smc_connection *conn)
  108. {
  109. #ifndef KERNEL_HAS_ATOMIC64
  110. unsigned long flags;
  111. spin_lock_irqsave(&conn->acurs_lock, flags);
  112. curs->acurs = val;
  113. spin_unlock_irqrestore(&conn->acurs_lock, flags);
  114. #else
  115. atomic64_set(&curs->acurs, val);
  116. #endif
  117. }
  118. /* calculate cursor difference between old and new, where old <= new */
  119. static inline int smc_curs_diff(unsigned int size,
  120. union smc_host_cursor *old,
  121. union smc_host_cursor *new)
  122. {
  123. if (old->wrap != new->wrap)
  124. return max_t(int, 0,
  125. ((size - old->count) + new->count));
  126. return max_t(int, 0, (new->count - old->count));
  127. }
  128. static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer,
  129. union smc_host_cursor *local,
  130. struct smc_connection *conn)
  131. {
  132. union smc_host_cursor temp;
  133. smc_curs_write(&temp, smc_curs_read(local, conn), conn);
  134. peer->count = htonl(temp.count);
  135. peer->wrap = htons(temp.wrap);
  136. /* peer->reserved = htons(0); must be ensured by caller */
  137. }
  138. static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer,
  139. struct smc_host_cdc_msg *local,
  140. struct smc_connection *conn)
  141. {
  142. peer->common.type = local->common.type;
  143. peer->len = local->len;
  144. peer->seqno = htons(local->seqno);
  145. peer->token = htonl(local->token);
  146. smc_host_cursor_to_cdc(&peer->prod, &local->prod, conn);
  147. smc_host_cursor_to_cdc(&peer->cons, &local->cons, conn);
  148. peer->prod_flags = local->prod_flags;
  149. peer->conn_state_flags = local->conn_state_flags;
  150. }
  151. static inline void smc_cdc_cursor_to_host(union smc_host_cursor *local,
  152. union smc_cdc_cursor *peer,
  153. struct smc_connection *conn)
  154. {
  155. union smc_host_cursor temp, old;
  156. union smc_cdc_cursor net;
  157. smc_curs_write(&old, smc_curs_read(local, conn), conn);
  158. smc_curs_write_net(&net, smc_curs_read_net(peer, conn), conn);
  159. temp.count = ntohl(net.count);
  160. temp.wrap = ntohs(net.wrap);
  161. if ((old.wrap > temp.wrap) && temp.wrap)
  162. return;
  163. if ((old.wrap == temp.wrap) &&
  164. (old.count > temp.count))
  165. return;
  166. smc_curs_write(local, smc_curs_read(&temp, conn), conn);
  167. }
  168. static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
  169. struct smc_cdc_msg *peer,
  170. struct smc_connection *conn)
  171. {
  172. local->common.type = peer->common.type;
  173. local->len = peer->len;
  174. local->seqno = ntohs(peer->seqno);
  175. local->token = ntohl(peer->token);
  176. smc_cdc_cursor_to_host(&local->prod, &peer->prod, conn);
  177. smc_cdc_cursor_to_host(&local->cons, &peer->cons, conn);
  178. local->prod_flags = peer->prod_flags;
  179. local->conn_state_flags = peer->conn_state_flags;
  180. }
  181. struct smc_cdc_tx_pend;
  182. int smc_cdc_get_free_slot(struct smc_link *link, struct smc_wr_buf **wr_buf,
  183. struct smc_cdc_tx_pend **pend);
  184. void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
  185. int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
  186. struct smc_cdc_tx_pend *pend);
  187. int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
  188. bool smc_cdc_tx_has_pending(struct smc_connection *conn);
  189. int smc_cdc_init(void) __init;
  190. #endif /* SMC_CDC_H */