xsk_queue.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* XDP user-space ring structure
  3. * Copyright(c) 2018 Intel Corporation.
  4. */
  5. #include <linux/log2.h>
  6. #include <linux/slab.h>
  7. #include <linux/overflow.h>
  8. #include "xsk_queue.h"
  9. void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask)
  10. {
  11. if (!q)
  12. return;
  13. q->size = size;
  14. q->chunk_mask = chunk_mask;
  15. }
  16. static u32 xskq_umem_get_ring_size(struct xsk_queue *q)
  17. {
  18. return sizeof(struct xdp_umem_ring) + q->nentries * sizeof(u64);
  19. }
  20. static u32 xskq_rxtx_get_ring_size(struct xsk_queue *q)
  21. {
  22. return sizeof(struct xdp_ring) + q->nentries * sizeof(struct xdp_desc);
  23. }
  24. struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
  25. {
  26. struct xsk_queue *q;
  27. gfp_t gfp_flags;
  28. size_t size;
  29. q = kzalloc(sizeof(*q), GFP_KERNEL);
  30. if (!q)
  31. return NULL;
  32. q->nentries = nentries;
  33. q->ring_mask = nentries - 1;
  34. gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN |
  35. __GFP_COMP | __GFP_NORETRY;
  36. size = umem_queue ? xskq_umem_get_ring_size(q) :
  37. xskq_rxtx_get_ring_size(q);
  38. q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
  39. get_order(size));
  40. if (!q->ring) {
  41. kfree(q);
  42. return NULL;
  43. }
  44. return q;
  45. }
  46. void xskq_destroy(struct xsk_queue *q)
  47. {
  48. if (!q)
  49. return;
  50. page_frag_free(q->ring);
  51. kfree(q);
  52. }
  53. struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
  54. {
  55. struct xdp_umem_fq_reuse *newq;
  56. /* Check for overflow */
  57. if (nentries > (u32)roundup_pow_of_two(nentries))
  58. return NULL;
  59. nentries = roundup_pow_of_two(nentries);
  60. newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL);
  61. if (!newq)
  62. return NULL;
  63. memset(newq, 0, offsetof(typeof(*newq), handles));
  64. newq->nentries = nentries;
  65. return newq;
  66. }
  67. EXPORT_SYMBOL_GPL(xsk_reuseq_prepare);
  68. struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
  69. struct xdp_umem_fq_reuse *newq)
  70. {
  71. struct xdp_umem_fq_reuse *oldq = umem->fq_reuse;
  72. if (!oldq) {
  73. umem->fq_reuse = newq;
  74. return NULL;
  75. }
  76. if (newq->nentries < oldq->length)
  77. return newq;
  78. memcpy(newq->handles, oldq->handles,
  79. array_size(oldq->length, sizeof(u64)));
  80. newq->length = oldq->length;
  81. umem->fq_reuse = newq;
  82. return oldq;
  83. }
  84. EXPORT_SYMBOL_GPL(xsk_reuseq_swap);
  85. void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
  86. {
  87. kvfree(rq);
  88. }
  89. EXPORT_SYMBOL_GPL(xsk_reuseq_free);
  90. void xsk_reuseq_destroy(struct xdp_umem *umem)
  91. {
  92. xsk_reuseq_free(umem->fq_reuse);
  93. umem->fq_reuse = NULL;
  94. }