|
@@ -117,7 +117,7 @@
|
|
|
#include "xgbe.h"
|
|
|
#include "xgbe-common.h"
|
|
|
|
|
|
-static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *);
|
|
|
+static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
|
|
|
|
|
|
static void xgbe_free_ring(struct xgbe_prv_data *pdata,
|
|
|
struct xgbe_ring *ring)
|
|
@@ -131,13 +131,24 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
|
|
|
if (ring->rdata) {
|
|
|
for (i = 0; i < ring->rdesc_count; i++) {
|
|
|
rdata = XGBE_GET_DESC_DATA(ring, i);
|
|
|
- xgbe_unmap_skb(pdata, rdata);
|
|
|
+ xgbe_unmap_rdata(pdata, rdata);
|
|
|
}
|
|
|
|
|
|
kfree(ring->rdata);
|
|
|
ring->rdata = NULL;
|
|
|
}
|
|
|
|
|
|
+ if (ring->rx_pa.pages) {
|
|
|
+ dma_unmap_page(pdata->dev, ring->rx_pa.pages_dma,
|
|
|
+ ring->rx_pa.pages_len, DMA_FROM_DEVICE);
|
|
|
+ put_page(ring->rx_pa.pages);
|
|
|
+
|
|
|
+ ring->rx_pa.pages = NULL;
|
|
|
+ ring->rx_pa.pages_len = 0;
|
|
|
+ ring->rx_pa.pages_offset = 0;
|
|
|
+ ring->rx_pa.pages_dma = 0;
|
|
|
+ }
|
|
|
+
|
|
|
if (ring->rdesc) {
|
|
|
dma_free_coherent(pdata->dev,
|
|
|
(sizeof(struct xgbe_ring_desc) *
|
|
@@ -233,6 +244,65 @@ err_ring:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
|
|
|
+ struct xgbe_ring *ring,
|
|
|
+ struct xgbe_ring_data *rdata)
|
|
|
+{
|
|
|
+ if (!ring->rx_pa.pages) {
|
|
|
+ struct page *pages = NULL;
|
|
|
+ dma_addr_t pages_dma;
|
|
|
+ gfp_t gfp;
|
|
|
+ int order, ret;
|
|
|
+
|
|
|
+ /* Try to obtain pages, decreasing order if necessary */
|
|
|
+ gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP;
|
|
|
+ order = max_t(int, PAGE_ALLOC_COSTLY_ORDER, 1);
|
|
|
+ while (--order >= 0) {
|
|
|
+ pages = alloc_pages(gfp, order);
|
|
|
+ if (pages)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ if (!pages)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ /* Map the pages */
|
|
|
+ pages_dma = dma_map_page(pdata->dev, pages, 0,
|
|
|
+ PAGE_SIZE << order, DMA_FROM_DEVICE);
|
|
|
+ ret = dma_mapping_error(pdata->dev, pages_dma);
|
|
|
+ if (ret) {
|
|
|
+ put_page(pages);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Set the values for this ring */
|
|
|
+ ring->rx_pa.pages = pages;
|
|
|
+ ring->rx_pa.pages_len = PAGE_SIZE << order;
|
|
|
+ ring->rx_pa.pages_offset = 0;
|
|
|
+ ring->rx_pa.pages_dma = pages_dma;
|
|
|
+ }
|
|
|
+
|
|
|
+ get_page(ring->rx_pa.pages);
|
|
|
+ rdata->rx_pa = ring->rx_pa;
|
|
|
+
|
|
|
+ rdata->rx_dma = ring->rx_pa.pages_dma + ring->rx_pa.pages_offset;
|
|
|
+ rdata->rx_dma_len = pdata->rx_buf_size;
|
|
|
+
|
|
|
+ ring->rx_pa.pages_offset += pdata->rx_buf_size;
|
|
|
+ if ((ring->rx_pa.pages_offset + pdata->rx_buf_size) >
|
|
|
+ ring->rx_pa.pages_len) {
|
|
|
+ /* This data descriptor is responsible for unmapping page(s) */
|
|
|
+ rdata->rx_unmap = ring->rx_pa;
|
|
|
+
|
|
|
+ /* Get a new allocation next time */
|
|
|
+ ring->rx_pa.pages = NULL;
|
|
|
+ ring->rx_pa.pages_len = 0;
|
|
|
+ ring->rx_pa.pages_offset = 0;
|
|
|
+ ring->rx_pa.pages_dma = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
|
|
|
{
|
|
|
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
|
@@ -281,8 +351,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
|
|
|
struct xgbe_ring *ring;
|
|
|
struct xgbe_ring_desc *rdesc;
|
|
|
struct xgbe_ring_data *rdata;
|
|
|
- dma_addr_t rdesc_dma, skb_dma;
|
|
|
- struct sk_buff *skb = NULL;
|
|
|
+ dma_addr_t rdesc_dma;
|
|
|
unsigned int i, j;
|
|
|
|
|
|
DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
|
|
@@ -302,22 +371,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
|
|
|
rdata->rdesc = rdesc;
|
|
|
rdata->rdesc_dma = rdesc_dma;
|
|
|
|
|
|
- /* Allocate skb & assign to each rdesc */
|
|
|
- skb = dev_alloc_skb(pdata->rx_buf_size);
|
|
|
- if (skb == NULL)
|
|
|
- break;
|
|
|
- skb_dma = dma_map_single(pdata->dev, skb->data,
|
|
|
- pdata->rx_buf_size,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
- if (dma_mapping_error(pdata->dev, skb_dma)) {
|
|
|
- netdev_alert(pdata->netdev,
|
|
|
- "failed to do the dma map\n");
|
|
|
- dev_kfree_skb_any(skb);
|
|
|
+ if (xgbe_map_rx_buffer(pdata, ring, rdata))
|
|
|
break;
|
|
|
- }
|
|
|
- rdata->skb = skb;
|
|
|
- rdata->skb_dma = skb_dma;
|
|
|
- rdata->skb_dma_len = pdata->rx_buf_size;
|
|
|
|
|
|
rdesc++;
|
|
|
rdesc_dma += sizeof(struct xgbe_ring_desc);
|
|
@@ -334,8 +389,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
|
|
|
DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
|
|
|
}
|
|
|
|
|
|
-static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
|
|
|
- struct xgbe_ring_data *rdata)
|
|
|
+static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
|
|
|
+ struct xgbe_ring_data *rdata)
|
|
|
{
|
|
|
if (rdata->skb_dma) {
|
|
|
if (rdata->mapped_as_page) {
|
|
@@ -354,6 +409,21 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
|
|
|
rdata->skb = NULL;
|
|
|
}
|
|
|
|
|
|
+ if (rdata->rx_pa.pages)
|
|
|
+ put_page(rdata->rx_pa.pages);
|
|
|
+
|
|
|
+ if (rdata->rx_unmap.pages) {
|
|
|
+ dma_unmap_page(pdata->dev, rdata->rx_unmap.pages_dma,
|
|
|
+ rdata->rx_unmap.pages_len, DMA_FROM_DEVICE);
|
|
|
+ put_page(rdata->rx_unmap.pages);
|
|
|
+ }
|
|
|
+
|
|
|
+ memset(&rdata->rx_pa, 0, sizeof(rdata->rx_pa));
|
|
|
+ memset(&rdata->rx_unmap, 0, sizeof(rdata->rx_unmap));
|
|
|
+
|
|
|
+ rdata->rx_dma = 0;
|
|
|
+ rdata->rx_dma_len = 0;
|
|
|
+
|
|
|
rdata->tso_header = 0;
|
|
|
rdata->len = 0;
|
|
|
rdata->interrupt = 0;
|
|
@@ -494,7 +564,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
|
|
|
err_out:
|
|
|
while (start_index < cur_index) {
|
|
|
rdata = XGBE_GET_DESC_DATA(ring, start_index++);
|
|
|
- xgbe_unmap_skb(pdata, rdata);
|
|
|
+ xgbe_unmap_rdata(pdata, rdata);
|
|
|
}
|
|
|
|
|
|
DBGPR("<--xgbe_map_tx_skb: count=0\n");
|
|
@@ -502,40 +572,25 @@ err_out:
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void xgbe_realloc_skb(struct xgbe_channel *channel)
|
|
|
+static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
|
|
|
{
|
|
|
struct xgbe_prv_data *pdata = channel->pdata;
|
|
|
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
|
|
struct xgbe_ring *ring = channel->rx_ring;
|
|
|
struct xgbe_ring_data *rdata;
|
|
|
- struct sk_buff *skb = NULL;
|
|
|
- dma_addr_t skb_dma;
|
|
|
int i;
|
|
|
|
|
|
- DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n",
|
|
|
+ DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
|
|
|
ring->rx.realloc_index);
|
|
|
|
|
|
for (i = 0; i < ring->dirty; i++) {
|
|
|
rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
|
|
|
|
|
|
/* Reset rdata values */
|
|
|
- xgbe_unmap_skb(pdata, rdata);
|
|
|
+ xgbe_unmap_rdata(pdata, rdata);
|
|
|
|
|
|
- /* Allocate skb & assign to each rdesc */
|
|
|
- skb = dev_alloc_skb(pdata->rx_buf_size);
|
|
|
- if (skb == NULL)
|
|
|
+ if (xgbe_map_rx_buffer(pdata, ring, rdata))
|
|
|
break;
|
|
|
- skb_dma = dma_map_single(pdata->dev, skb->data,
|
|
|
- pdata->rx_buf_size, DMA_FROM_DEVICE);
|
|
|
- if (dma_mapping_error(pdata->dev, skb_dma)) {
|
|
|
- netdev_alert(pdata->netdev,
|
|
|
- "failed to do the dma map\n");
|
|
|
- dev_kfree_skb_any(skb);
|
|
|
- break;
|
|
|
- }
|
|
|
- rdata->skb = skb;
|
|
|
- rdata->skb_dma = skb_dma;
|
|
|
- rdata->skb_dma_len = pdata->rx_buf_size;
|
|
|
|
|
|
hw_if->rx_desc_reset(rdata);
|
|
|
|
|
@@ -543,7 +598,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
|
|
|
}
|
|
|
ring->dirty = 0;
|
|
|
|
|
|
- DBGPR("<--xgbe_realloc_skb\n");
|
|
|
+ DBGPR("<--xgbe_realloc_rx_buffer\n");
|
|
|
}
|
|
|
|
|
|
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
|
|
@@ -553,8 +608,8 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
|
|
|
desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
|
|
|
desc_if->free_ring_resources = xgbe_free_ring_resources;
|
|
|
desc_if->map_tx_skb = xgbe_map_tx_skb;
|
|
|
- desc_if->realloc_skb = xgbe_realloc_skb;
|
|
|
- desc_if->unmap_skb = xgbe_unmap_skb;
|
|
|
+ desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
|
|
|
+ desc_if->unmap_rdata = xgbe_unmap_rdata;
|
|
|
desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
|
|
|
desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
|
|
|
|