xsk_queue.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* XDP user-space ring structure
  3. * Copyright(c) 2018 Intel Corporation.
  4. */
  5. #ifndef _LINUX_XSK_QUEUE_H
  6. #define _LINUX_XSK_QUEUE_H
  7. #include <linux/types.h>
  8. #include <linux/if_xdp.h>
  9. #include <net/xdp_sock.h>
  10. #define RX_BATCH_SIZE 16
  11. #define LAZY_UPDATE_THRESHOLD 128
  12. struct xdp_ring {
  13. u32 producer ____cacheline_aligned_in_smp;
  14. u32 consumer ____cacheline_aligned_in_smp;
  15. };
  16. /* Used for the RX and TX queues for packets */
  17. struct xdp_rxtx_ring {
  18. struct xdp_ring ptrs;
  19. struct xdp_desc desc[0] ____cacheline_aligned_in_smp;
  20. };
  21. /* Used for the fill and completion queues for buffers */
  22. struct xdp_umem_ring {
  23. struct xdp_ring ptrs;
  24. u64 desc[0] ____cacheline_aligned_in_smp;
  25. };
  26. struct xsk_queue {
  27. u64 chunk_mask;
  28. u64 size;
  29. u32 ring_mask;
  30. u32 nentries;
  31. u32 prod_head;
  32. u32 prod_tail;
  33. u32 cons_head;
  34. u32 cons_tail;
  35. struct xdp_ring *ring;
  36. u64 invalid_descs;
  37. };
  38. /* Common functions operating for both RXTX and umem queues */
  39. static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
  40. {
  41. return q ? q->invalid_descs : 0;
  42. }
  43. static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
  44. {
  45. u32 entries = q->prod_tail - q->cons_tail;
  46. if (entries == 0) {
  47. /* Refresh the local pointer */
  48. q->prod_tail = READ_ONCE(q->ring->producer);
  49. entries = q->prod_tail - q->cons_tail;
  50. }
  51. return (entries > dcnt) ? dcnt : entries;
  52. }
  53. static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
  54. {
  55. u32 free_entries = q->nentries - (producer - q->cons_tail);
  56. if (free_entries >= dcnt)
  57. return free_entries;
  58. /* Refresh the local tail pointer */
  59. q->cons_tail = READ_ONCE(q->ring->consumer);
  60. return q->nentries - (producer - q->cons_tail);
  61. }
  62. /* UMEM queue */
  63. static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
  64. {
  65. if (addr >= q->size) {
  66. q->invalid_descs++;
  67. return false;
  68. }
  69. return true;
  70. }
  71. static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr)
  72. {
  73. while (q->cons_tail != q->cons_head) {
  74. struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
  75. unsigned int idx = q->cons_tail & q->ring_mask;
  76. *addr = READ_ONCE(ring->desc[idx]) & q->chunk_mask;
  77. if (xskq_is_valid_addr(q, *addr))
  78. return addr;
  79. q->cons_tail++;
  80. }
  81. return NULL;
  82. }
  83. static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr)
  84. {
  85. if (q->cons_tail == q->cons_head) {
  86. WRITE_ONCE(q->ring->consumer, q->cons_tail);
  87. q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
  88. /* Order consumer and data */
  89. smp_rmb();
  90. }
  91. return xskq_validate_addr(q, addr);
  92. }
  93. static inline void xskq_discard_addr(struct xsk_queue *q)
  94. {
  95. q->cons_tail++;
  96. }
  97. static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
  98. {
  99. struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
  100. if (xskq_nb_free(q, q->prod_tail, 1) == 0)
  101. return -ENOSPC;
  102. ring->desc[q->prod_tail++ & q->ring_mask] = addr;
  103. /* Order producer and data */
  104. smp_wmb();
  105. WRITE_ONCE(q->ring->producer, q->prod_tail);
  106. return 0;
  107. }
  108. static inline int xskq_produce_addr_lazy(struct xsk_queue *q, u64 addr)
  109. {
  110. struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
  111. if (xskq_nb_free(q, q->prod_head, LAZY_UPDATE_THRESHOLD) == 0)
  112. return -ENOSPC;
  113. ring->desc[q->prod_head++ & q->ring_mask] = addr;
  114. return 0;
  115. }
  116. static inline void xskq_produce_flush_addr_n(struct xsk_queue *q,
  117. u32 nb_entries)
  118. {
  119. /* Order producer and data */
  120. smp_wmb();
  121. q->prod_tail += nb_entries;
  122. WRITE_ONCE(q->ring->producer, q->prod_tail);
  123. }
  124. static inline int xskq_reserve_addr(struct xsk_queue *q)
  125. {
  126. if (xskq_nb_free(q, q->prod_head, 1) == 0)
  127. return -ENOSPC;
  128. q->prod_head++;
  129. return 0;
  130. }
  131. /* Rx/Tx queue */
  132. static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
  133. {
  134. if (!xskq_is_valid_addr(q, d->addr))
  135. return false;
  136. if (((d->addr + d->len) & q->chunk_mask) !=
  137. (d->addr & q->chunk_mask)) {
  138. q->invalid_descs++;
  139. return false;
  140. }
  141. return true;
  142. }
  143. static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
  144. struct xdp_desc *desc)
  145. {
  146. while (q->cons_tail != q->cons_head) {
  147. struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
  148. unsigned int idx = q->cons_tail & q->ring_mask;
  149. *desc = READ_ONCE(ring->desc[idx]);
  150. if (xskq_is_valid_desc(q, desc))
  151. return desc;
  152. q->cons_tail++;
  153. }
  154. return NULL;
  155. }
  156. static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
  157. struct xdp_desc *desc)
  158. {
  159. if (q->cons_tail == q->cons_head) {
  160. WRITE_ONCE(q->ring->consumer, q->cons_tail);
  161. q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
  162. /* Order consumer and data */
  163. smp_rmb();
  164. }
  165. return xskq_validate_desc(q, desc);
  166. }
  167. static inline void xskq_discard_desc(struct xsk_queue *q)
  168. {
  169. q->cons_tail++;
  170. }
  171. static inline int xskq_produce_batch_desc(struct xsk_queue *q,
  172. u64 addr, u32 len)
  173. {
  174. struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
  175. unsigned int idx;
  176. if (xskq_nb_free(q, q->prod_head, 1) == 0)
  177. return -ENOSPC;
  178. idx = (q->prod_head++) & q->ring_mask;
  179. ring->desc[idx].addr = addr;
  180. ring->desc[idx].len = len;
  181. return 0;
  182. }
  183. static inline void xskq_produce_flush_desc(struct xsk_queue *q)
  184. {
  185. /* Order producer and data */
  186. smp_wmb();
  187. q->prod_tail = q->prod_head,
  188. WRITE_ONCE(q->ring->producer, q->prod_tail);
  189. }
  190. static inline bool xskq_full_desc(struct xsk_queue *q)
  191. {
  192. return xskq_nb_avail(q, q->nentries) == q->nentries;
  193. }
  194. static inline bool xskq_empty_desc(struct xsk_queue *q)
  195. {
  196. return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries;
  197. }
  198. void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask);
  199. struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
  200. void xskq_destroy(struct xsk_queue *q_ops);
  201. /* Executed by the core when the entire UMEM gets freed */
  202. void xsk_reuseq_destroy(struct xdp_umem *umem);
  203. #endif /* _LINUX_XSK_QUEUE_H */