|
@@ -72,7 +72,7 @@ char ixgbe_default_device_descr[] =
|
|
|
static char ixgbe_default_device_descr[] =
|
|
|
"Intel(R) 10 Gigabit Network Connection";
|
|
|
#endif
|
|
|
-#define DRV_VERSION "4.4.0-k"
|
|
|
+#define DRV_VERSION "5.0.0-k"
|
|
|
const char ixgbe_driver_version[] = DRV_VERSION;
|
|
|
static const char ixgbe_copyright[] =
|
|
|
"Copyright (c) 1999-2016 Intel Corporation.";
|
|
@@ -945,28 +945,6 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
|
|
|
- struct ixgbe_tx_buffer *tx_buffer)
|
|
|
-{
|
|
|
- if (tx_buffer->skb) {
|
|
|
- dev_kfree_skb_any(tx_buffer->skb);
|
|
|
- if (dma_unmap_len(tx_buffer, len))
|
|
|
- dma_unmap_single(ring->dev,
|
|
|
- dma_unmap_addr(tx_buffer, dma),
|
|
|
- dma_unmap_len(tx_buffer, len),
|
|
|
- DMA_TO_DEVICE);
|
|
|
- } else if (dma_unmap_len(tx_buffer, len)) {
|
|
|
- dma_unmap_page(ring->dev,
|
|
|
- dma_unmap_addr(tx_buffer, dma),
|
|
|
- dma_unmap_len(tx_buffer, len),
|
|
|
- DMA_TO_DEVICE);
|
|
|
- }
|
|
|
- tx_buffer->next_to_watch = NULL;
|
|
|
- tx_buffer->skb = NULL;
|
|
|
- dma_unmap_len_set(tx_buffer, len, 0);
|
|
|
- /* tx_buffer must be completely set up in the transmit path */
|
|
|
-}
|
|
|
-
|
|
|
static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
|
|
|
{
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
@@ -1198,7 +1176,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
|
/* clear tx_buffer data */
|
|
|
- tx_buffer->skb = NULL;
|
|
|
dma_unmap_len_set(tx_buffer, len, 0);
|
|
|
|
|
|
/* unmap remaining buffers */
|
|
@@ -1552,6 +1529,11 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
|
|
|
+{
|
|
|
+ return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
|
|
|
+}
|
|
|
+
|
|
|
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
|
|
|
struct ixgbe_rx_buffer *bi)
|
|
|
{
|
|
@@ -1570,8 +1552,10 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
|
|
|
}
|
|
|
|
|
|
/* map page for use */
|
|
|
- dma = dma_map_page(rx_ring->dev, page, 0,
|
|
|
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
|
|
|
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
|
|
|
+ ixgbe_rx_pg_size(rx_ring),
|
|
|
+ DMA_FROM_DEVICE,
|
|
|
+ IXGBE_RX_DMA_ATTR);
|
|
|
|
|
|
/*
|
|
|
* if mapping failed free memory back to system since
|
|
@@ -1586,7 +1570,8 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
|
|
|
|
|
|
bi->dma = dma;
|
|
|
bi->page = page;
|
|
|
- bi->page_offset = 0;
|
|
|
+ bi->page_offset = ixgbe_rx_offset(rx_ring);
|
|
|
+ bi->pagecnt_bias = 1;
|
|
|
|
|
|
return true;
|
|
|
}
|
|
@@ -1601,6 +1586,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
|
|
|
union ixgbe_adv_rx_desc *rx_desc;
|
|
|
struct ixgbe_rx_buffer *bi;
|
|
|
u16 i = rx_ring->next_to_use;
|
|
|
+ u16 bufsz;
|
|
|
|
|
|
/* nothing to do */
|
|
|
if (!cleaned_count)
|
|
@@ -1610,10 +1596,17 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
|
|
|
bi = &rx_ring->rx_buffer_info[i];
|
|
|
i -= rx_ring->count;
|
|
|
|
|
|
+ bufsz = ixgbe_rx_bufsz(rx_ring);
|
|
|
+
|
|
|
do {
|
|
|
if (!ixgbe_alloc_mapped_page(rx_ring, bi))
|
|
|
break;
|
|
|
|
|
|
+ /* sync the buffer for use by the device */
|
|
|
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
|
|
|
+ bi->page_offset, bufsz,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+
|
|
|
/*
|
|
|
* Refresh the desc even if buffer_addrs didn't change
|
|
|
* because each write-back erases this info.
|
|
@@ -1629,8 +1622,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
|
|
|
i -= rx_ring->count;
|
|
|
}
|
|
|
|
|
|
- /* clear the status bits for the next_to_use descriptor */
|
|
|
- rx_desc->wb.upper.status_error = 0;
|
|
|
+ /* clear the length for the next_to_use descriptor */
|
|
|
+ rx_desc->wb.upper.length = 0;
|
|
|
|
|
|
cleaned_count--;
|
|
|
} while (cleaned_count);
|
|
@@ -1832,19 +1825,19 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
|
|
|
{
|
|
|
/* if the page was released unmap it, else just sync our portion */
|
|
|
if (unlikely(IXGBE_CB(skb)->page_released)) {
|
|
|
- dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
|
|
|
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
|
|
|
- IXGBE_CB(skb)->page_released = false;
|
|
|
+ dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
|
|
|
+ ixgbe_rx_pg_size(rx_ring),
|
|
|
+ DMA_FROM_DEVICE,
|
|
|
+ IXGBE_RX_DMA_ATTR);
|
|
|
} else {
|
|
|
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
|
|
|
|
|
|
dma_sync_single_range_for_cpu(rx_ring->dev,
|
|
|
IXGBE_CB(skb)->dma,
|
|
|
frag->page_offset,
|
|
|
- ixgbe_rx_bufsz(rx_ring),
|
|
|
+ skb_frag_size(frag),
|
|
|
DMA_FROM_DEVICE);
|
|
|
}
|
|
|
- IXGBE_CB(skb)->dma = 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1880,7 +1873,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
|
|
|
}
|
|
|
|
|
|
/* place header in linear portion of buffer */
|
|
|
- if (skb_is_nonlinear(skb))
|
|
|
+ if (!skb_headlen(skb))
|
|
|
ixgbe_pull_tail(rx_ring, skb);
|
|
|
|
|
|
#ifdef IXGBE_FCOE
|
|
@@ -1915,14 +1908,14 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
|
|
|
nta++;
|
|
|
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
|
|
|
|
|
|
- /* transfer page from old buffer to new buffer */
|
|
|
- *new_buff = *old_buff;
|
|
|
-
|
|
|
- /* sync the buffer for use by the device */
|
|
|
- dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
|
|
|
- new_buff->page_offset,
|
|
|
- ixgbe_rx_bufsz(rx_ring),
|
|
|
- DMA_FROM_DEVICE);
|
|
|
+ /* Transfer page from old buffer to new buffer.
|
|
|
+ * Move each member individually to avoid possible store
|
|
|
+ * forwarding stalls and unnecessary copy of skb.
|
|
|
+ */
|
|
|
+ new_buff->dma = old_buff->dma;
|
|
|
+ new_buff->page = old_buff->page;
|
|
|
+ new_buff->page_offset = old_buff->page_offset;
|
|
|
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
|
|
}
|
|
|
|
|
|
static inline bool ixgbe_page_is_reserved(struct page *page)
|
|
@@ -1930,6 +1923,43 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
|
|
|
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
|
|
}
|
|
|
|
|
|
+static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
|
|
|
+{
|
|
|
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
|
|
|
+ struct page *page = rx_buffer->page;
|
|
|
+
|
|
|
+ /* avoid re-using remote pages */
|
|
|
+ if (unlikely(ixgbe_page_is_reserved(page)))
|
|
|
+ return false;
|
|
|
+
|
|
|
+#if (PAGE_SIZE < 8192)
|
|
|
+ /* if we are only owner of page we can reuse it */
|
|
|
+ if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
|
|
|
+ return false;
|
|
|
+#else
|
|
|
+ /* The last offset is a bit aggressive in that we assume the
|
|
|
+ * worst case of FCoE being enabled and using a 3K buffer.
|
|
|
+ * However this should have minimal impact as the 1K extra is
|
|
|
+ * still less than one buffer in size.
|
|
|
+ */
|
|
|
+#define IXGBE_LAST_OFFSET \
|
|
|
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
|
|
|
+ if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
|
|
|
+ return false;
|
|
|
+#endif
|
|
|
+
|
|
|
+ /* If we have drained the page fragment pool we need to update
|
|
|
+ * the pagecnt_bias and page count so that we fully restock the
|
|
|
+ * number of references the driver holds.
|
|
|
+ */
|
|
|
+ if (unlikely(!pagecnt_bias)) {
|
|
|
+ page_ref_add(page, USHRT_MAX);
|
|
|
+ rx_buffer->pagecnt_bias = USHRT_MAX;
|
|
|
+ }
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
|
|
|
* @rx_ring: rx descriptor ring to transact packets on
|
|
@@ -1945,144 +1975,172 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
|
|
|
* The function will then update the page offset if necessary and return
|
|
|
* true if the buffer can be reused by the adapter.
|
|
|
**/
|
|
|
-static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
|
|
|
+static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
|
|
|
struct ixgbe_rx_buffer *rx_buffer,
|
|
|
- union ixgbe_adv_rx_desc *rx_desc,
|
|
|
- struct sk_buff *skb)
|
|
|
+ struct sk_buff *skb,
|
|
|
+ unsigned int size)
|
|
|
{
|
|
|
- struct page *page = rx_buffer->page;
|
|
|
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
- unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
|
|
|
+ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
|
|
|
#else
|
|
|
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
|
|
|
- unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
|
|
|
- ixgbe_rx_bufsz(rx_ring);
|
|
|
+ unsigned int truesize = ring_uses_build_skb(rx_ring) ?
|
|
|
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
|
|
|
+ SKB_DATA_ALIGN(size);
|
|
|
#endif
|
|
|
-
|
|
|
- if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
|
|
|
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
|
|
|
-
|
|
|
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
|
|
|
-
|
|
|
- /* page is not reserved, we can reuse buffer as-is */
|
|
|
- if (likely(!ixgbe_page_is_reserved(page)))
|
|
|
- return true;
|
|
|
-
|
|
|
- /* this page cannot be reused so discard it */
|
|
|
- __free_pages(page, ixgbe_rx_pg_order(rx_ring));
|
|
|
- return false;
|
|
|
- }
|
|
|
-
|
|
|
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
|
|
|
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
|
|
|
rx_buffer->page_offset, size, truesize);
|
|
|
-
|
|
|
- /* avoid re-using remote pages */
|
|
|
- if (unlikely(ixgbe_page_is_reserved(page)))
|
|
|
- return false;
|
|
|
-
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
- /* if we are only owner of page we can reuse it */
|
|
|
- if (unlikely(page_count(page) != 1))
|
|
|
- return false;
|
|
|
-
|
|
|
- /* flip page offset to other buffer */
|
|
|
rx_buffer->page_offset ^= truesize;
|
|
|
#else
|
|
|
- /* move offset up to the next cache line */
|
|
|
rx_buffer->page_offset += truesize;
|
|
|
-
|
|
|
- if (rx_buffer->page_offset > last_offset)
|
|
|
- return false;
|
|
|
#endif
|
|
|
-
|
|
|
- /* Even if we own the page, we are not allowed to use atomic_set()
|
|
|
- * This would break get_page_unless_zero() users.
|
|
|
- */
|
|
|
- page_ref_inc(page);
|
|
|
-
|
|
|
- return true;
|
|
|
}
|
|
|
|
|
|
-static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
|
|
|
- union ixgbe_adv_rx_desc *rx_desc)
|
|
|
+static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
|
|
|
+ union ixgbe_adv_rx_desc *rx_desc,
|
|
|
+ struct sk_buff **skb,
|
|
|
+ const unsigned int size)
|
|
|
{
|
|
|
struct ixgbe_rx_buffer *rx_buffer;
|
|
|
- struct sk_buff *skb;
|
|
|
- struct page *page;
|
|
|
|
|
|
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
|
|
|
- page = rx_buffer->page;
|
|
|
- prefetchw(page);
|
|
|
+ prefetchw(rx_buffer->page);
|
|
|
+ *skb = rx_buffer->skb;
|
|
|
|
|
|
- skb = rx_buffer->skb;
|
|
|
+ /* Delay unmapping of the first packet. It carries the header
|
|
|
+ * information, HW may still access the header after the writeback.
|
|
|
+ * Only unmap it when EOP is reached
|
|
|
+ */
|
|
|
+ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
|
|
|
+ if (!*skb)
|
|
|
+ goto skip_sync;
|
|
|
+ } else {
|
|
|
+ if (*skb)
|
|
|
+ ixgbe_dma_sync_frag(rx_ring, *skb);
|
|
|
+ }
|
|
|
|
|
|
- if (likely(!skb)) {
|
|
|
- void *page_addr = page_address(page) +
|
|
|
- rx_buffer->page_offset;
|
|
|
+ /* we are reusing so sync this buffer for CPU use */
|
|
|
+ dma_sync_single_range_for_cpu(rx_ring->dev,
|
|
|
+ rx_buffer->dma,
|
|
|
+ rx_buffer->page_offset,
|
|
|
+ size,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+skip_sync:
|
|
|
+ rx_buffer->pagecnt_bias--;
|
|
|
|
|
|
- /* prefetch first cache line of first page */
|
|
|
- prefetch(page_addr);
|
|
|
-#if L1_CACHE_BYTES < 128
|
|
|
- prefetch(page_addr + L1_CACHE_BYTES);
|
|
|
-#endif
|
|
|
+ return rx_buffer;
|
|
|
+}
|
|
|
|
|
|
- /* allocate a skb to store the frags */
|
|
|
- skb = napi_alloc_skb(&rx_ring->q_vector->napi,
|
|
|
- IXGBE_RX_HDR_SIZE);
|
|
|
- if (unlikely(!skb)) {
|
|
|
- rx_ring->rx_stats.alloc_rx_buff_failed++;
|
|
|
- return NULL;
|
|
|
+static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
|
|
|
+ struct ixgbe_rx_buffer *rx_buffer,
|
|
|
+ struct sk_buff *skb)
|
|
|
+{
|
|
|
+ if (ixgbe_can_reuse_rx_page(rx_buffer)) {
|
|
|
+ /* hand second half of page back to the ring */
|
|
|
+ ixgbe_reuse_rx_page(rx_ring, rx_buffer);
|
|
|
+ } else {
|
|
|
+ if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
|
|
|
+ /* the page has been released from the ring */
|
|
|
+ IXGBE_CB(skb)->page_released = true;
|
|
|
+ } else {
|
|
|
+ /* we are not reusing the buffer so unmap it */
|
|
|
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
|
|
|
+ ixgbe_rx_pg_size(rx_ring),
|
|
|
+ DMA_FROM_DEVICE,
|
|
|
+ IXGBE_RX_DMA_ATTR);
|
|
|
}
|
|
|
+ __page_frag_cache_drain(rx_buffer->page,
|
|
|
+ rx_buffer->pagecnt_bias);
|
|
|
+ }
|
|
|
|
|
|
- /*
|
|
|
- * we will be copying header into skb->data in
|
|
|
- * pskb_may_pull so it is in our interest to prefetch
|
|
|
- * it now to avoid a possible cache miss
|
|
|
- */
|
|
|
- prefetchw(skb->data);
|
|
|
+ /* clear contents of rx_buffer */
|
|
|
+ rx_buffer->page = NULL;
|
|
|
+ rx_buffer->skb = NULL;
|
|
|
+}
|
|
|
|
|
|
- /*
|
|
|
- * Delay unmapping of the first packet. It carries the
|
|
|
- * header information, HW may still access the header
|
|
|
- * after the writeback. Only unmap it when EOP is
|
|
|
- * reached
|
|
|
- */
|
|
|
- if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
|
|
|
- goto dma_sync;
|
|
|
+static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
|
|
|
+ struct ixgbe_rx_buffer *rx_buffer,
|
|
|
+ union ixgbe_adv_rx_desc *rx_desc,
|
|
|
+ unsigned int size)
|
|
|
+{
|
|
|
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
|
|
|
+#if (PAGE_SIZE < 8192)
|
|
|
+ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
|
|
|
+#else
|
|
|
+ unsigned int truesize = SKB_DATA_ALIGN(size);
|
|
|
+#endif
|
|
|
+ struct sk_buff *skb;
|
|
|
|
|
|
- IXGBE_CB(skb)->dma = rx_buffer->dma;
|
|
|
- } else {
|
|
|
- if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
|
|
|
- ixgbe_dma_sync_frag(rx_ring, skb);
|
|
|
+ /* prefetch first cache line of first page */
|
|
|
+ prefetch(va);
|
|
|
+#if L1_CACHE_BYTES < 128
|
|
|
+ prefetch(va + L1_CACHE_BYTES);
|
|
|
+#endif
|
|
|
|
|
|
-dma_sync:
|
|
|
- /* we are reusing so sync this buffer for CPU use */
|
|
|
- dma_sync_single_range_for_cpu(rx_ring->dev,
|
|
|
- rx_buffer->dma,
|
|
|
- rx_buffer->page_offset,
|
|
|
- ixgbe_rx_bufsz(rx_ring),
|
|
|
- DMA_FROM_DEVICE);
|
|
|
+ /* allocate a skb to store the frags */
|
|
|
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
|
|
|
+ if (unlikely(!skb))
|
|
|
+ return NULL;
|
|
|
|
|
|
- rx_buffer->skb = NULL;
|
|
|
- }
|
|
|
+ if (size > IXGBE_RX_HDR_SIZE) {
|
|
|
+ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
|
|
|
+ IXGBE_CB(skb)->dma = rx_buffer->dma;
|
|
|
|
|
|
- /* pull page into skb */
|
|
|
- if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
|
|
|
- /* hand second half of page back to the ring */
|
|
|
- ixgbe_reuse_rx_page(rx_ring, rx_buffer);
|
|
|
- } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
|
|
|
- /* the page has been released from the ring */
|
|
|
- IXGBE_CB(skb)->page_released = true;
|
|
|
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
|
|
|
+ rx_buffer->page_offset,
|
|
|
+ size, truesize);
|
|
|
+#if (PAGE_SIZE < 8192)
|
|
|
+ rx_buffer->page_offset ^= truesize;
|
|
|
+#else
|
|
|
+ rx_buffer->page_offset += truesize;
|
|
|
+#endif
|
|
|
} else {
|
|
|
- /* we are not reusing the buffer so unmap it */
|
|
|
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
|
|
|
- ixgbe_rx_pg_size(rx_ring),
|
|
|
- DMA_FROM_DEVICE);
|
|
|
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
|
|
|
+ rx_buffer->pagecnt_bias++;
|
|
|
}
|
|
|
|
|
|
- /* clear contents of buffer_info */
|
|
|
- rx_buffer->page = NULL;
|
|
|
+ return skb;
|
|
|
+}
|
|
|
+
|
|
|
+static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
|
|
|
+ struct ixgbe_rx_buffer *rx_buffer,
|
|
|
+ union ixgbe_adv_rx_desc *rx_desc,
|
|
|
+ unsigned int size)
|
|
|
+{
|
|
|
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
|
|
|
+#if (PAGE_SIZE < 8192)
|
|
|
+ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
|
|
|
+#else
|
|
|
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
|
|
|
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size);
|
|
|
+#endif
|
|
|
+ struct sk_buff *skb;
|
|
|
+
|
|
|
+ /* prefetch first cache line of first page */
|
|
|
+ prefetch(va);
|
|
|
+#if L1_CACHE_BYTES < 128
|
|
|
+ prefetch(va + L1_CACHE_BYTES);
|
|
|
+#endif
|
|
|
+
|
|
|
+ /* build an skb to around the page buffer */
|
|
|
+ skb = build_skb(va - IXGBE_SKB_PAD, truesize);
|
|
|
+ if (unlikely(!skb))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ /* update pointers within the skb to store the data */
|
|
|
+ skb_reserve(skb, IXGBE_SKB_PAD);
|
|
|
+ __skb_put(skb, size);
|
|
|
+
|
|
|
+ /* record DMA address if this is the start of a chain of buffers */
|
|
|
+ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
|
|
|
+ IXGBE_CB(skb)->dma = rx_buffer->dma;
|
|
|
+
|
|
|
+ /* update buffer offset */
|
|
|
+#if (PAGE_SIZE < 8192)
|
|
|
+ rx_buffer->page_offset ^= truesize;
|
|
|
+#else
|
|
|
+ rx_buffer->page_offset += truesize;
|
|
|
+#endif
|
|
|
|
|
|
return skb;
|
|
|
}
|
|
@@ -2114,7 +2172,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|
|
|
|
|
while (likely(total_rx_packets < budget)) {
|
|
|
union ixgbe_adv_rx_desc *rx_desc;
|
|
|
+ struct ixgbe_rx_buffer *rx_buffer;
|
|
|
struct sk_buff *skb;
|
|
|
+ unsigned int size;
|
|
|
|
|
|
/* return some buffers to hardware, one at a time is too slow */
|
|
|
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
|
|
@@ -2123,8 +2183,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|
|
}
|
|
|
|
|
|
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
|
|
|
-
|
|
|
- if (!rx_desc->wb.upper.status_error)
|
|
|
+ size = le16_to_cpu(rx_desc->wb.upper.length);
|
|
|
+ if (!size)
|
|
|
break;
|
|
|
|
|
|
/* This memory barrier is needed to keep us from reading
|
|
@@ -2133,13 +2193,26 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|
|
*/
|
|
|
dma_rmb();
|
|
|
|
|
|
+ rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
|
|
|
+
|
|
|
/* retrieve a buffer from the ring */
|
|
|
- skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
|
|
|
+ if (skb)
|
|
|
+ ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
|
|
|
+ else if (ring_uses_build_skb(rx_ring))
|
|
|
+ skb = ixgbe_build_skb(rx_ring, rx_buffer,
|
|
|
+ rx_desc, size);
|
|
|
+ else
|
|
|
+ skb = ixgbe_construct_skb(rx_ring, rx_buffer,
|
|
|
+ rx_desc, size);
|
|
|
|
|
|
/* exit if we failed to retrieve a buffer */
|
|
|
- if (!skb)
|
|
|
+ if (!skb) {
|
|
|
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
|
|
|
+ rx_buffer->pagecnt_bias++;
|
|
|
break;
|
|
|
+ }
|
|
|
|
|
|
+ ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
|
|
|
cleaned_count++;
|
|
|
|
|
|
/* place incomplete frames back on ring for completion */
|
|
@@ -3197,6 +3270,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
|
|
|
|
|
|
clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
|
|
|
|
|
|
+ /* reinitialize tx_buffer_info */
|
|
|
+ memset(ring->tx_buffer_info, 0,
|
|
|
+ sizeof(struct ixgbe_tx_buffer) * ring->count);
|
|
|
+
|
|
|
/* enable queue */
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
|
|
|
|
|
@@ -3367,7 +3444,10 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
|
|
|
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
|
|
|
|
|
|
/* configure the packet buffer length */
|
|
|
- srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
|
|
|
+ if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
|
|
|
+ srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
|
|
|
+ else
|
|
|
+ srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
|
|
|
|
|
|
/* configure descriptor type */
|
|
|
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
|
|
@@ -3668,6 +3748,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
|
|
|
struct ixgbe_ring *ring)
|
|
|
{
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
+ union ixgbe_adv_rx_desc *rx_desc;
|
|
|
u64 rdba = ring->dma;
|
|
|
u32 rxdctl;
|
|
|
u8 reg_idx = ring->reg_idx;
|
|
@@ -3700,8 +3781,27 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
|
|
|
*/
|
|
|
rxdctl &= ~0x3FFFFF;
|
|
|
rxdctl |= 0x080420;
|
|
|
+#if (PAGE_SIZE < 8192)
|
|
|
+ } else {
|
|
|
+ rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
|
|
|
+ IXGBE_RXDCTL_RLPML_EN);
|
|
|
+
|
|
|
+ /* Limit the maximum frame size so we don't overrun the skb */
|
|
|
+ if (ring_uses_build_skb(ring) &&
|
|
|
+ !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
|
|
|
+ rxdctl |= IXGBE_MAX_FRAME_BUILD_SKB |
|
|
|
+ IXGBE_RXDCTL_RLPML_EN;
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
+ /* initialize rx_buffer_info */
|
|
|
+ memset(ring->rx_buffer_info, 0,
|
|
|
+ sizeof(struct ixgbe_rx_buffer) * ring->count);
|
|
|
+
|
|
|
+ /* initialize Rx descriptor 0 */
|
|
|
+ rx_desc = IXGBE_RX_DESC(ring, 0);
|
|
|
+ rx_desc->wb.upper.length = 0;
|
|
|
+
|
|
|
/* enable receive descriptor ring */
|
|
|
rxdctl |= IXGBE_RXDCTL_ENABLE;
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
|
|
@@ -3838,10 +3938,30 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
|
|
|
*/
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
rx_ring = adapter->rx_ring[i];
|
|
|
+
|
|
|
+ clear_ring_rsc_enabled(rx_ring);
|
|
|
+ clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
|
|
|
+ clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
|
|
|
+
|
|
|
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
|
|
|
set_ring_rsc_enabled(rx_ring);
|
|
|
- else
|
|
|
- clear_ring_rsc_enabled(rx_ring);
|
|
|
+
|
|
|
+ if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
|
|
|
+ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
|
|
|
+
|
|
|
+ clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
|
|
|
+ if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
|
|
|
+
|
|
|
+#if (PAGE_SIZE < 8192)
|
|
|
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
|
|
|
+ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
|
|
|
+
|
|
|
+ if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))
|
|
|
+ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
|
|
|
+#endif
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -4855,45 +4975,47 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
|
|
|
**/
|
|
|
static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
|
|
|
{
|
|
|
- struct device *dev = rx_ring->dev;
|
|
|
- unsigned long size;
|
|
|
- u16 i;
|
|
|
-
|
|
|
- /* ring already cleared, nothing to do */
|
|
|
- if (!rx_ring->rx_buffer_info)
|
|
|
- return;
|
|
|
+ u16 i = rx_ring->next_to_clean;
|
|
|
+ struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
|
|
|
|
|
|
/* Free all the Rx ring sk_buffs */
|
|
|
- for (i = 0; i < rx_ring->count; i++) {
|
|
|
- struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
|
|
|
-
|
|
|
+ while (i != rx_ring->next_to_alloc) {
|
|
|
if (rx_buffer->skb) {
|
|
|
struct sk_buff *skb = rx_buffer->skb;
|
|
|
if (IXGBE_CB(skb)->page_released)
|
|
|
- dma_unmap_page(dev,
|
|
|
- IXGBE_CB(skb)->dma,
|
|
|
- ixgbe_rx_bufsz(rx_ring),
|
|
|
- DMA_FROM_DEVICE);
|
|
|
+ dma_unmap_page_attrs(rx_ring->dev,
|
|
|
+ IXGBE_CB(skb)->dma,
|
|
|
+ ixgbe_rx_pg_size(rx_ring),
|
|
|
+ DMA_FROM_DEVICE,
|
|
|
+ IXGBE_RX_DMA_ATTR);
|
|
|
dev_kfree_skb(skb);
|
|
|
- rx_buffer->skb = NULL;
|
|
|
}
|
|
|
|
|
|
- if (!rx_buffer->page)
|
|
|
- continue;
|
|
|
+ /* Invalidate cache lines that may have been written to by
|
|
|
+ * device so that we avoid corrupting memory.
|
|
|
+ */
|
|
|
+ dma_sync_single_range_for_cpu(rx_ring->dev,
|
|
|
+ rx_buffer->dma,
|
|
|
+ rx_buffer->page_offset,
|
|
|
+ ixgbe_rx_bufsz(rx_ring),
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
|
|
|
- dma_unmap_page(dev, rx_buffer->dma,
|
|
|
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
|
|
|
- __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
|
|
|
+ /* free resources associated with mapping */
|
|
|
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
|
|
|
+ ixgbe_rx_pg_size(rx_ring),
|
|
|
+ DMA_FROM_DEVICE,
|
|
|
+ IXGBE_RX_DMA_ATTR);
|
|
|
+ __page_frag_cache_drain(rx_buffer->page,
|
|
|
+ rx_buffer->pagecnt_bias);
|
|
|
|
|
|
- rx_buffer->page = NULL;
|
|
|
+ i++;
|
|
|
+ rx_buffer++;
|
|
|
+ if (i == rx_ring->count) {
|
|
|
+ i = 0;
|
|
|
+ rx_buffer = rx_ring->rx_buffer_info;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
|
|
|
- memset(rx_ring->rx_buffer_info, 0, size);
|
|
|
-
|
|
|
- /* Zero out the descriptor ring */
|
|
|
- memset(rx_ring->desc, 0, rx_ring->size);
|
|
|
-
|
|
|
rx_ring->next_to_alloc = 0;
|
|
|
rx_ring->next_to_clean = 0;
|
|
|
rx_ring->next_to_use = 0;
|
|
@@ -5362,28 +5484,57 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
|
|
|
**/
|
|
|
static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
|
|
|
{
|
|
|
- struct ixgbe_tx_buffer *tx_buffer_info;
|
|
|
- unsigned long size;
|
|
|
- u16 i;
|
|
|
+ u16 i = tx_ring->next_to_clean;
|
|
|
+ struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
|
|
|
|
|
|
- /* ring already cleared, nothing to do */
|
|
|
- if (!tx_ring->tx_buffer_info)
|
|
|
- return;
|
|
|
+ while (i != tx_ring->next_to_use) {
|
|
|
+ union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
|
|
|
|
|
|
- /* Free all the Tx ring sk_buffs */
|
|
|
- for (i = 0; i < tx_ring->count; i++) {
|
|
|
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
|
|
|
- ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
|
|
|
- }
|
|
|
+ /* Free all the Tx ring sk_buffs */
|
|
|
+ dev_kfree_skb_any(tx_buffer->skb);
|
|
|
|
|
|
- netdev_tx_reset_queue(txring_txq(tx_ring));
|
|
|
+ /* unmap skb header data */
|
|
|
+ dma_unmap_single(tx_ring->dev,
|
|
|
+ dma_unmap_addr(tx_buffer, dma),
|
|
|
+ dma_unmap_len(tx_buffer, len),
|
|
|
+ DMA_TO_DEVICE);
|
|
|
|
|
|
- size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
|
|
|
- memset(tx_ring->tx_buffer_info, 0, size);
|
|
|
+ /* check for eop_desc to determine the end of the packet */
|
|
|
+ eop_desc = tx_buffer->next_to_watch;
|
|
|
+ tx_desc = IXGBE_TX_DESC(tx_ring, i);
|
|
|
+
|
|
|
+ /* unmap remaining buffers */
|
|
|
+ while (tx_desc != eop_desc) {
|
|
|
+ tx_buffer++;
|
|
|
+ tx_desc++;
|
|
|
+ i++;
|
|
|
+ if (unlikely(i == tx_ring->count)) {
|
|
|
+ i = 0;
|
|
|
+ tx_buffer = tx_ring->tx_buffer_info;
|
|
|
+ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* unmap any remaining paged data */
|
|
|
+ if (dma_unmap_len(tx_buffer, len))
|
|
|
+ dma_unmap_page(tx_ring->dev,
|
|
|
+ dma_unmap_addr(tx_buffer, dma),
|
|
|
+ dma_unmap_len(tx_buffer, len),
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* move us one more past the eop_desc for start of next pkt */
|
|
|
+ tx_buffer++;
|
|
|
+ i++;
|
|
|
+ if (unlikely(i == tx_ring->count)) {
|
|
|
+ i = 0;
|
|
|
+ tx_buffer = tx_ring->tx_buffer_info;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- /* Zero out the descriptor ring */
|
|
|
- memset(tx_ring->desc, 0, tx_ring->size);
|
|
|
+ /* reset BQL for queue */
|
|
|
+ netdev_tx_reset_queue(txring_txq(tx_ring));
|
|
|
|
|
|
+ /* reset next_to_use and next_to_clean */
|
|
|
tx_ring->next_to_use = 0;
|
|
|
tx_ring->next_to_clean = 0;
|
|
|
}
|
|
@@ -5829,9 +5980,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
|
|
|
if (tx_ring->q_vector)
|
|
|
ring_node = tx_ring->q_vector->numa_node;
|
|
|
|
|
|
- tx_ring->tx_buffer_info = vzalloc_node(size, ring_node);
|
|
|
+ tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
|
|
|
if (!tx_ring->tx_buffer_info)
|
|
|
- tx_ring->tx_buffer_info = vzalloc(size);
|
|
|
+ tx_ring->tx_buffer_info = vmalloc(size);
|
|
|
if (!tx_ring->tx_buffer_info)
|
|
|
goto err;
|
|
|
|
|
@@ -5913,9 +6064,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
|
|
|
if (rx_ring->q_vector)
|
|
|
ring_node = rx_ring->q_vector->numa_node;
|
|
|
|
|
|
- rx_ring->rx_buffer_info = vzalloc_node(size, ring_node);
|
|
|
+ rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
|
|
|
if (!rx_ring->rx_buffer_info)
|
|
|
- rx_ring->rx_buffer_info = vzalloc(size);
|
|
|
+ rx_ring->rx_buffer_info = vmalloc(size);
|
|
|
if (!rx_ring->rx_buffer_info)
|
|
|
goto err;
|
|
|
|
|
@@ -7630,18 +7781,32 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
|
|
|
return;
|
|
|
dma_error:
|
|
|
dev_err(tx_ring->dev, "TX DMA map failed\n");
|
|
|
+ tx_buffer = &tx_ring->tx_buffer_info[i];
|
|
|
|
|
|
/* clear dma mappings for failed tx_buffer_info map */
|
|
|
- for (;;) {
|
|
|
+ while (tx_buffer != first) {
|
|
|
+ if (dma_unmap_len(tx_buffer, len))
|
|
|
+ dma_unmap_page(tx_ring->dev,
|
|
|
+ dma_unmap_addr(tx_buffer, dma),
|
|
|
+ dma_unmap_len(tx_buffer, len),
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ dma_unmap_len_set(tx_buffer, len, 0);
|
|
|
+
|
|
|
+ if (i--)
|
|
|
+ i += tx_ring->count;
|
|
|
tx_buffer = &tx_ring->tx_buffer_info[i];
|
|
|
- ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
|
|
|
- if (tx_buffer == first)
|
|
|
- break;
|
|
|
- if (i == 0)
|
|
|
- i = tx_ring->count;
|
|
|
- i--;
|
|
|
}
|
|
|
|
|
|
+ if (dma_unmap_len(tx_buffer, len))
|
|
|
+ dma_unmap_single(tx_ring->dev,
|
|
|
+ dma_unmap_addr(tx_buffer, dma),
|
|
|
+ dma_unmap_len(tx_buffer, len),
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ dma_unmap_len_set(tx_buffer, len, 0);
|
|
|
+
|
|
|
+ dev_kfree_skb_any(first->skb);
|
|
|
+ first->skb = NULL;
|
|
|
+
|
|
|
tx_ring->next_to_use = i;
|
|
|
}
|
|
|
|
|
@@ -9687,7 +9852,7 @@ skip_sriov:
|
|
|
|
|
|
#ifdef CONFIG_IXGBE_DCB
|
|
|
if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
|
|
|
- netdev->dcbnl_ops = &dcbnl_ops;
|
|
|
+ netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
|
|
|
#endif
|
|
|
|
|
|
#ifdef IXGBE_FCOE
|