|
@@ -2133,6 +2133,21 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
|
|
|
#if L1_CACHE_BYTES < 128
|
|
#if L1_CACHE_BYTES < 128
|
|
|
prefetch(xdp->data + L1_CACHE_BYTES);
|
|
prefetch(xdp->data + L1_CACHE_BYTES);
|
|
|
#endif
|
|
#endif
|
|
|
|
|
+ /* Note, we get here by enabling legacy-rx via:
|
|
|
|
|
+ *
|
|
|
|
|
+ * ethtool --set-priv-flags <dev> legacy-rx on
|
|
|
|
|
+ *
|
|
|
|
|
+ * In this mode, we currently get 0 extra XDP headroom as
|
|
|
|
|
+ * opposed to having legacy-rx off, where we process XDP
|
|
|
|
|
+ * packets going to stack via ixgbe_build_skb(). The latter
|
|
|
|
|
+ * provides us currently with 192 bytes of headroom.
|
|
|
|
|
+ *
|
|
|
|
|
+ * For ixgbe_construct_skb() mode it means that the
|
|
|
|
|
+ * xdp->data_meta will always point to xdp->data, since
|
|
|
|
|
+ * the helper cannot expand the head. Should this ever
|
|
|
|
|
+ * change in future for legacy-rx mode on, then lets also
|
|
|
|
|
+ * add xdp->data_meta handling here.
|
|
|
|
|
+ */
|
|
|
|
|
|
|
|
/* allocate a skb to store the frags */
|
|
/* allocate a skb to store the frags */
|
|
|
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
|
|
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
|
|
@@ -2165,6 +2180,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
|
|
|
struct xdp_buff *xdp,
|
|
struct xdp_buff *xdp,
|
|
|
union ixgbe_adv_rx_desc *rx_desc)
|
|
union ixgbe_adv_rx_desc *rx_desc)
|
|
|
{
|
|
{
|
|
|
|
|
+ unsigned int metasize = xdp->data - xdp->data_meta;
|
|
|
#if (PAGE_SIZE < 8192)
|
|
#if (PAGE_SIZE < 8192)
|
|
|
unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
|
|
unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
|
|
|
#else
|
|
#else
|
|
@@ -2174,10 +2190,14 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
|
|
|
#endif
|
|
#endif
|
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
- /* prefetch first cache line of first page */
|
|
|
|
|
- prefetch(xdp->data);
|
|
|
|
|
|
|
+ /* Prefetch first cache line of first page. If xdp->data_meta
|
|
|
|
|
+ * is unused, this points extactly as xdp->data, otherwise we
|
|
|
|
|
+ * likely have a consumer accessing first few bytes of meta
|
|
|
|
|
+ * data, and then actual data.
|
|
|
|
|
+ */
|
|
|
|
|
+ prefetch(xdp->data_meta);
|
|
|
#if L1_CACHE_BYTES < 128
|
|
#if L1_CACHE_BYTES < 128
|
|
|
- prefetch(xdp->data + L1_CACHE_BYTES);
|
|
|
|
|
|
|
+ prefetch(xdp->data_meta + L1_CACHE_BYTES);
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
/* build an skb to around the page buffer */
|
|
/* build an skb to around the page buffer */
|
|
@@ -2188,6 +2208,8 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
|
|
|
/* update pointers within the skb to store the data */
|
|
/* update pointers within the skb to store the data */
|
|
|
skb_reserve(skb, xdp->data - xdp->data_hard_start);
|
|
skb_reserve(skb, xdp->data - xdp->data_hard_start);
|
|
|
__skb_put(skb, xdp->data_end - xdp->data);
|
|
__skb_put(skb, xdp->data_end - xdp->data);
|
|
|
|
|
+ if (metasize)
|
|
|
|
|
+ skb_metadata_set(skb, metasize);
|
|
|
|
|
|
|
|
/* record DMA address if this is the start of a chain of buffers */
|
|
/* record DMA address if this is the start of a chain of buffers */
|
|
|
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
|
|
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
|
|
@@ -2326,7 +2348,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|
|
if (!skb) {
|
|
if (!skb) {
|
|
|
xdp.data = page_address(rx_buffer->page) +
|
|
xdp.data = page_address(rx_buffer->page) +
|
|
|
rx_buffer->page_offset;
|
|
rx_buffer->page_offset;
|
|
|
- xdp_set_data_meta_invalid(&xdp);
|
|
|
|
|
|
|
+ xdp.data_meta = xdp.data;
|
|
|
xdp.data_hard_start = xdp.data -
|
|
xdp.data_hard_start = xdp.data -
|
|
|
ixgbe_rx_offset(rx_ring);
|
|
ixgbe_rx_offset(rx_ring);
|
|
|
xdp.data_end = xdp.data + size;
|
|
xdp.data_end = xdp.data + size;
|