|
@@ -1570,8 +1570,10 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
|
|
|
}
|
|
|
|
|
|
/* map page for use */
|
|
|
- dma = dma_map_page(rx_ring->dev, page, 0,
|
|
|
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
|
|
|
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
|
|
|
+ ixgbe_rx_pg_size(rx_ring),
|
|
|
+ DMA_FROM_DEVICE,
|
|
|
+ IXGBE_RX_DMA_ATTR);
|
|
|
|
|
|
/*
|
|
|
* if mapping failed free memory back to system since
|
|
@@ -1614,6 +1616,12 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
|
|
|
if (!ixgbe_alloc_mapped_page(rx_ring, bi))
|
|
|
break;
|
|
|
|
|
|
+ /* sync the buffer for use by the device */
|
|
|
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
|
|
|
+ bi->page_offset,
|
|
|
+ ixgbe_rx_bufsz(rx_ring),
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+
|
|
|
/*
|
|
|
* Refresh the desc even if buffer_addrs didn't change
|
|
|
* because each write-back erases this info.
|
|
@@ -1832,8 +1840,10 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
|
|
|
{
|
|
|
/* if the page was released unmap it, else just sync our portion */
|
|
|
if (unlikely(IXGBE_CB(skb)->page_released)) {
|
|
|
- dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
|
|
|
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
|
|
|
+ dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
|
|
|
+ ixgbe_rx_pg_size(rx_ring),
|
|
|
+ DMA_FROM_DEVICE,
|
|
|
+ IXGBE_RX_DMA_ATTR);
|
|
|
IXGBE_CB(skb)->page_released = false;
|
|
|
} else {
|
|
|
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
|
|
@@ -1917,12 +1927,6 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
|
|
|
|
|
|
/* transfer page from old buffer to new buffer */
|
|
|
*new_buff = *old_buff;
|
|
|
-
|
|
|
- /* sync the buffer for use by the device */
|
|
|
- dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
|
|
|
- new_buff->page_offset,
|
|
|
- ixgbe_rx_bufsz(rx_ring),
|
|
|
- DMA_FROM_DEVICE);
|
|
|
}
|
|
|
|
|
|
static inline bool ixgbe_page_is_reserved(struct page *page)
|
|
@@ -2089,9 +2093,10 @@ dma_sync:
|
|
|
IXGBE_CB(skb)->page_released = true;
|
|
|
} else {
|
|
|
/* we are not reusing the buffer so unmap it */
|
|
|
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
|
|
|
- ixgbe_rx_pg_size(rx_ring),
|
|
|
- DMA_FROM_DEVICE);
|
|
|
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
|
|
|
+ ixgbe_rx_pg_size(rx_ring),
|
|
|
+ DMA_FROM_DEVICE,
|
|
|
+ IXGBE_RX_DMA_ATTR);
|
|
|
}
|
|
|
|
|
|
/* clear contents of buffer_info */
|
|
@@ -4883,10 +4888,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
|
|
|
if (rx_buffer->skb) {
|
|
|
struct sk_buff *skb = rx_buffer->skb;
|
|
|
if (IXGBE_CB(skb)->page_released)
|
|
|
- dma_unmap_page(dev,
|
|
|
- IXGBE_CB(skb)->dma,
|
|
|
- ixgbe_rx_bufsz(rx_ring),
|
|
|
- DMA_FROM_DEVICE);
|
|
|
+ dma_unmap_page_attrs(dev,
|
|
|
+ IXGBE_CB(skb)->dma,
|
|
|
+ ixgbe_rx_pg_size(rx_ring),
|
|
|
+ DMA_FROM_DEVICE,
|
|
|
+ IXGBE_RX_DMA_ATTR);
|
|
|
dev_kfree_skb(skb);
|
|
|
rx_buffer->skb = NULL;
|
|
|
}
|
|
@@ -4894,8 +4900,20 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
|
|
|
if (!rx_buffer->page)
|
|
|
continue;
|
|
|
|
|
|
- dma_unmap_page(dev, rx_buffer->dma,
|
|
|
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
|
|
|
+ /* Invalidate cache lines that may have been written to by
|
|
|
+ * device so that we avoid corrupting memory.
|
|
|
+ */
|
|
|
+ dma_sync_single_range_for_cpu(rx_ring->dev,
|
|
|
+ rx_buffer->dma,
|
|
|
+ rx_buffer->page_offset,
|
|
|
+ ixgbe_rx_bufsz(rx_ring),
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+
|
|
|
+ /* free resources associated with mapping */
|
|
|
+ dma_unmap_page_attrs(dev, rx_buffer->dma,
|
|
|
+ ixgbe_rx_pg_size(rx_ring),
|
|
|
+ DMA_FROM_DEVICE,
|
|
|
+ IXGBE_RX_DMA_ATTR);
|
|
|
__free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
|
|
|
|
|
|
rx_buffer->page = NULL;
|