xsk_queue.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* XDP user-space ring structure
  3. * Copyright(c) 2018 Intel Corporation.
  4. */
  5. #ifndef _LINUX_XSK_QUEUE_H
  6. #define _LINUX_XSK_QUEUE_H
  7. #include <linux/types.h>
  8. #include <linux/if_xdp.h>
  9. #include "xdp_umem_props.h"
  10. #define RX_BATCH_SIZE 16
  11. struct xsk_queue {
  12. struct xdp_umem_props umem_props;
  13. u32 ring_mask;
  14. u32 nentries;
  15. u32 prod_head;
  16. u32 prod_tail;
  17. u32 cons_head;
  18. u32 cons_tail;
  19. struct xdp_ring *ring;
  20. u64 invalid_descs;
  21. };
  22. /* Common functions operating for both RXTX and umem queues */
  23. static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
  24. {
  25. return q ? q->invalid_descs : 0;
  26. }
  27. static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
  28. {
  29. u32 entries = q->prod_tail - q->cons_tail;
  30. if (entries == 0) {
  31. /* Refresh the local pointer */
  32. q->prod_tail = READ_ONCE(q->ring->producer);
  33. entries = q->prod_tail - q->cons_tail;
  34. }
  35. return (entries > dcnt) ? dcnt : entries;
  36. }
  37. static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
  38. {
  39. u32 free_entries = q->nentries - (producer - q->cons_tail);
  40. if (free_entries >= dcnt)
  41. return free_entries;
  42. /* Refresh the local tail pointer */
  43. q->cons_tail = READ_ONCE(q->ring->consumer);
  44. return q->nentries - (producer - q->cons_tail);
  45. }
  46. /* UMEM queue */
  47. static inline bool xskq_is_valid_id(struct xsk_queue *q, u32 idx)
  48. {
  49. if (unlikely(idx >= q->umem_props.nframes)) {
  50. q->invalid_descs++;
  51. return false;
  52. }
  53. return true;
  54. }
  55. static inline u32 *xskq_validate_id(struct xsk_queue *q)
  56. {
  57. while (q->cons_tail != q->cons_head) {
  58. struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
  59. unsigned int idx = q->cons_tail & q->ring_mask;
  60. if (xskq_is_valid_id(q, ring->desc[idx]))
  61. return &ring->desc[idx];
  62. q->cons_tail++;
  63. }
  64. return NULL;
  65. }
  66. static inline u32 *xskq_peek_id(struct xsk_queue *q)
  67. {
  68. struct xdp_umem_ring *ring;
  69. if (q->cons_tail == q->cons_head) {
  70. WRITE_ONCE(q->ring->consumer, q->cons_tail);
  71. q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
  72. /* Order consumer and data */
  73. smp_rmb();
  74. return xskq_validate_id(q);
  75. }
  76. ring = (struct xdp_umem_ring *)q->ring;
  77. return &ring->desc[q->cons_tail & q->ring_mask];
  78. }
  79. static inline void xskq_discard_id(struct xsk_queue *q)
  80. {
  81. q->cons_tail++;
  82. (void)xskq_validate_id(q);
  83. }
  84. static inline int xskq_produce_id(struct xsk_queue *q, u32 id)
  85. {
  86. struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
  87. ring->desc[q->prod_tail++ & q->ring_mask] = id;
  88. /* Order producer and data */
  89. smp_wmb();
  90. WRITE_ONCE(q->ring->producer, q->prod_tail);
  91. return 0;
  92. }
  93. static inline int xskq_reserve_id(struct xsk_queue *q)
  94. {
  95. if (xskq_nb_free(q, q->prod_head, 1) == 0)
  96. return -ENOSPC;
  97. q->prod_head++;
  98. return 0;
  99. }
  100. /* Rx/Tx queue */
  101. static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
  102. {
  103. u32 buff_len;
  104. if (unlikely(d->idx >= q->umem_props.nframes)) {
  105. q->invalid_descs++;
  106. return false;
  107. }
  108. buff_len = q->umem_props.frame_size;
  109. if (unlikely(d->len > buff_len || d->len == 0 ||
  110. d->offset > buff_len || d->offset + d->len > buff_len)) {
  111. q->invalid_descs++;
  112. return false;
  113. }
  114. return true;
  115. }
  116. static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
  117. struct xdp_desc *desc)
  118. {
  119. while (q->cons_tail != q->cons_head) {
  120. struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
  121. unsigned int idx = q->cons_tail & q->ring_mask;
  122. if (xskq_is_valid_desc(q, &ring->desc[idx])) {
  123. if (desc)
  124. *desc = ring->desc[idx];
  125. return desc;
  126. }
  127. q->cons_tail++;
  128. }
  129. return NULL;
  130. }
  131. static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
  132. struct xdp_desc *desc)
  133. {
  134. struct xdp_rxtx_ring *ring;
  135. if (q->cons_tail == q->cons_head) {
  136. WRITE_ONCE(q->ring->consumer, q->cons_tail);
  137. q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
  138. /* Order consumer and data */
  139. smp_rmb();
  140. return xskq_validate_desc(q, desc);
  141. }
  142. ring = (struct xdp_rxtx_ring *)q->ring;
  143. *desc = ring->desc[q->cons_tail & q->ring_mask];
  144. return desc;
  145. }
  146. static inline void xskq_discard_desc(struct xsk_queue *q)
  147. {
  148. q->cons_tail++;
  149. (void)xskq_validate_desc(q, NULL);
  150. }
  151. static inline int xskq_produce_batch_desc(struct xsk_queue *q,
  152. u32 id, u32 len, u16 offset)
  153. {
  154. struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
  155. unsigned int idx;
  156. if (xskq_nb_free(q, q->prod_head, 1) == 0)
  157. return -ENOSPC;
  158. idx = (q->prod_head++) & q->ring_mask;
  159. ring->desc[idx].idx = id;
  160. ring->desc[idx].len = len;
  161. ring->desc[idx].offset = offset;
  162. return 0;
  163. }
  164. static inline void xskq_produce_flush_desc(struct xsk_queue *q)
  165. {
  166. /* Order producer and data */
  167. smp_wmb();
  168. q->prod_tail = q->prod_head,
  169. WRITE_ONCE(q->ring->producer, q->prod_tail);
  170. }
  171. static inline bool xskq_full_desc(struct xsk_queue *q)
  172. {
  173. return xskq_nb_avail(q, q->nentries) == q->nentries;
  174. }
  175. static inline bool xskq_empty_desc(struct xsk_queue *q)
  176. {
  177. return xskq_nb_free(q, q->prod_tail, 1) == q->nentries;
  178. }
  179. void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
  180. struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
  181. void xskq_destroy(struct xsk_queue *q_ops);
  182. #endif /* _LINUX_XSK_QUEUE_H */