|
@@ -3947,10 +3947,21 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
|
|
|
if (!buffer_info->page)
|
|
|
continue;
|
|
|
|
|
|
- dma_unmap_page(rx_ring->dev,
|
|
|
- buffer_info->dma,
|
|
|
- PAGE_SIZE,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
+ /* Invalidate cache lines that may have been written to by
|
|
|
+ * device so that we avoid corrupting memory.
|
|
|
+ */
|
|
|
+ dma_sync_single_range_for_cpu(rx_ring->dev,
|
|
|
+ buffer_info->dma,
|
|
|
+ buffer_info->page_offset,
|
|
|
+ IGB_RX_BUFSZ,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+
|
|
|
+ /* free resources associated with mapping */
|
|
|
+ dma_unmap_page_attrs(rx_ring->dev,
|
|
|
+ buffer_info->dma,
|
|
|
+ PAGE_SIZE,
|
|
|
+ DMA_FROM_DEVICE,
|
|
|
+ DMA_ATTR_SKIP_CPU_SYNC);
|
|
|
__free_page(buffer_info->page);
|
|
|
|
|
|
buffer_info->page = NULL;
|
|
@@ -6812,12 +6823,6 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
|
|
|
|
|
|
/* transfer page from old buffer to new buffer */
|
|
|
*new_buff = *old_buff;
|
|
|
-
|
|
|
- /* sync the buffer for use by the device */
|
|
|
- dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
|
|
|
- old_buff->page_offset,
|
|
|
- IGB_RX_BUFSZ,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
}
|
|
|
|
|
|
static inline bool igb_page_is_reserved(struct page *page)
|
|
@@ -6938,6 +6943,13 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
|
|
|
page = rx_buffer->page;
|
|
|
prefetchw(page);
|
|
|
|
|
|
+ /* we are reusing so sync this buffer for CPU use */
|
|
|
+ dma_sync_single_range_for_cpu(rx_ring->dev,
|
|
|
+ rx_buffer->dma,
|
|
|
+ rx_buffer->page_offset,
|
|
|
+ size,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+
|
|
|
if (likely(!skb)) {
|
|
|
void *page_addr = page_address(page) +
|
|
|
rx_buffer->page_offset;
|
|
@@ -6962,21 +6974,15 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
|
|
|
prefetchw(skb->data);
|
|
|
}
|
|
|
|
|
|
- /* we are reusing so sync this buffer for CPU use */
|
|
|
- dma_sync_single_range_for_cpu(rx_ring->dev,
|
|
|
- rx_buffer->dma,
|
|
|
- rx_buffer->page_offset,
|
|
|
- size,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
-
|
|
|
/* pull page into skb */
|
|
|
if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
|
|
|
/* hand second half of page back to the ring */
|
|
|
igb_reuse_rx_page(rx_ring, rx_buffer);
|
|
|
} else {
|
|
|
/* we are not reusing the buffer so unmap it */
|
|
|
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
|
|
|
- PAGE_SIZE, DMA_FROM_DEVICE);
|
|
|
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
|
|
|
+ PAGE_SIZE, DMA_FROM_DEVICE,
|
|
|
+ DMA_ATTR_SKIP_CPU_SYNC);
|
|
|
}
|
|
|
|
|
|
/* clear contents of rx_buffer */
|
|
@@ -7234,7 +7240,8 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
|
|
|
}
|
|
|
|
|
|
/* map page for use */
|
|
|
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
|
|
|
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
|
|
|
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
|
|
|
|
|
/* if mapping failed free memory back to system since
|
|
|
* there isn't much point in holding memory we can't use
|
|
@@ -7275,6 +7282,12 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
|
|
|
if (!igb_alloc_mapped_page(rx_ring, bi))
|
|
|
break;
|
|
|
|
|
|
+ /* sync the buffer for use by the device */
|
|
|
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
|
|
|
+ bi->page_offset,
|
|
|
+ IGB_RX_BUFSZ,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+
|
|
|
/* Refresh the desc even if buffer_addrs didn't change
|
|
|
* because each write-back erases this info.
|
|
|
*/
|