|
@@ -308,7 +308,13 @@ err:
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
|
|
|
|
|
|
-static void xdp_return(void *data, struct xdp_mem_info *mem)
|
|
|
+/* XDP RX runs under NAPI protection, and in different delivery error
|
|
|
+ * scenarios (e.g. queue full), it is possible to return the xdp_frame
|
|
|
+ * while still leveraging this protection. The @napi_direct boolian
|
|
|
+ * is used for those calls sites. Thus, allowing for faster recycling
|
|
|
+ * of xdp_frames/pages in those cases.
|
|
|
+ */
|
|
|
+static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
|
|
|
{
|
|
|
struct xdp_mem_allocator *xa;
|
|
|
struct page *page;
|
|
@@ -320,7 +326,7 @@ static void xdp_return(void *data, struct xdp_mem_info *mem)
|
|
|
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
|
|
|
page = virt_to_head_page(data);
|
|
|
if (xa)
|
|
|
- page_pool_put_page(xa->page_pool, page);
|
|
|
+ page_pool_put_page(xa->page_pool, page, napi_direct);
|
|
|
else
|
|
|
put_page(page);
|
|
|
rcu_read_unlock();
|
|
@@ -340,12 +346,18 @@ static void xdp_return(void *data, struct xdp_mem_info *mem)
|
|
|
|
|
|
void xdp_return_frame(struct xdp_frame *xdpf)
|
|
|
{
|
|
|
- xdp_return(xdpf->data, &xdpf->mem);
|
|
|
+ __xdp_return(xdpf->data, &xdpf->mem, false);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(xdp_return_frame);
|
|
|
|
|
|
+void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
|
|
|
+{
|
|
|
+ __xdp_return(xdpf->data, &xdpf->mem, true);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
|
|
|
+
|
|
|
void xdp_return_buff(struct xdp_buff *xdp)
|
|
|
{
|
|
|
- xdp_return(xdp->data, &xdp->rxq->mem);
|
|
|
+ __xdp_return(xdp->data, &xdp->rxq->mem, true);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(xdp_return_buff);
|