|
@@ -21,6 +21,12 @@ struct xdp_umem_page {
|
|
|
dma_addr_t dma;
|
|
|
};
|
|
|
|
|
|
+struct xdp_umem_fq_reuse {
|
|
|
+ u32 nentries;
|
|
|
+ u32 length;
|
|
|
+ u64 handles[];
|
|
|
+};
|
|
|
+
|
|
|
struct xdp_umem {
|
|
|
struct xsk_queue *fq;
|
|
|
struct xsk_queue *cq;
|
|
@@ -37,6 +43,7 @@ struct xdp_umem {
|
|
|
struct page **pgs;
|
|
|
u32 npgs;
|
|
|
struct net_device *dev;
|
|
|
+ struct xdp_umem_fq_reuse *fq_reuse;
|
|
|
u16 queue_id;
|
|
|
bool zc;
|
|
|
spinlock_t xsk_list_lock;
|
|
@@ -75,6 +82,10 @@ void xsk_umem_discard_addr(struct xdp_umem *umem);
|
|
|
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
|
|
|
bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
|
|
|
void xsk_umem_consume_tx_done(struct xdp_umem *umem);
|
|
|
+struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
|
|
|
+struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
|
|
|
+ struct xdp_umem_fq_reuse *newq);
|
|
|
+void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
|
|
|
|
|
|
static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
|
|
|
{
|
|
@@ -85,6 +96,35 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
|
|
|
{
|
|
|
return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
|
|
|
}
|
|
|
+
|
|
|
+/* Reuse-queue aware version of FILL queue helpers */
|
|
|
+static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
|
|
|
+{
|
|
|
+ struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
|
|
|
+
|
|
|
+ if (!rq->length)
|
|
|
+ return xsk_umem_peek_addr(umem, addr);
|
|
|
+
|
|
|
+ *addr = rq->handles[rq->length - 1];
|
|
|
+ return addr;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
|
|
|
+{
|
|
|
+ struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
|
|
|
+
|
|
|
+ if (!rq->length)
|
|
|
+ xsk_umem_discard_addr(umem);
|
|
|
+ else
|
|
|
+ rq->length--;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
|
|
|
+{
|
|
|
+ struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
|
|
|
+
|
|
|
+ rq->handles[rq->length++] = addr;
|
|
|
+}
|
|
|
#else
|
|
|
static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
|
|
|
{
|
|
@@ -128,6 +168,21 @@ static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
+static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
|
|
|
+{
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
|
|
|
+ struct xdp_umem *umem,
|
|
|
+ struct xdp_umem_fq_reuse *newq)
|
|
|
+{
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
|
|
|
{
|
|
|
return NULL;
|
|
@@ -137,6 +192,20 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
|
|
|
{
|
|
|
return 0;
|
|
|
}
|
|
|
+
|
|
|
+static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
|
|
|
+{
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
#endif /* CONFIG_XDP_SOCKETS */
|
|
|
|
|
|
#endif /* _LINUX_XDP_SOCK_H */
|