|
@@ -544,30 +544,49 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
|
|
|
static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|
|
u16 index, gfp_t gfp_mask)
|
|
|
{
|
|
|
- struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
|
|
|
struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
|
|
|
struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
|
|
|
+ struct bnx2x_alloc_pool *pool = &fp->page_pool;
|
|
|
dma_addr_t mapping;
|
|
|
|
|
|
- if (unlikely(page == NULL)) {
|
|
|
- BNX2X_ERR("Can't alloc sge\n");
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
+ if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
|
|
|
|
|
|
- mapping = dma_map_page(&bp->pdev->dev, page, 0,
|
|
|
- SGE_PAGES, DMA_FROM_DEVICE);
|
|
|
- if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
|
|
|
- __free_pages(page, PAGES_PER_SGE_SHIFT);
|
|
|
- BNX2X_ERR("Can't map sge\n");
|
|
|
- return -ENOMEM;
|
|
|
+ /* put page reference used by the memory pool, since we
|
|
|
+ * won't be using this page as the mempool anymore.
|
|
|
+ */
|
|
|
+ if (pool->page)
|
|
|
+ put_page(pool->page);
|
|
|
+
|
|
|
+ pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
|
|
|
+ if (unlikely(!pool->page)) {
|
|
|
+ BNX2X_ERR("Can't alloc sge\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ pool->dma = dma_map_page(&bp->pdev->dev, pool->page, 0,
|
|
|
+ PAGE_SIZE, DMA_FROM_DEVICE);
|
|
|
+ if (unlikely(dma_mapping_error(&bp->pdev->dev,
|
|
|
+ pool->dma))) {
|
|
|
+ __free_pages(pool->page, PAGES_PER_SGE_SHIFT);
|
|
|
+ pool->page = NULL;
|
|
|
+ BNX2X_ERR("Can't map sge\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ pool->offset = 0;
|
|
|
}
|
|
|
|
|
|
- sw_buf->page = page;
|
|
|
+ get_page(pool->page);
|
|
|
+ sw_buf->page = pool->page;
|
|
|
+ sw_buf->offset = pool->offset;
|
|
|
+
|
|
|
+ mapping = pool->dma + sw_buf->offset;
|
|
|
dma_unmap_addr_set(sw_buf, mapping, mapping);
|
|
|
|
|
|
sge->addr_hi = cpu_to_le32(U64_HI(mapping));
|
|
|
sge->addr_lo = cpu_to_le32(U64_LO(mapping));
|
|
|
|
|
|
+ pool->offset += SGE_PAGE_SIZE;
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -629,20 +648,22 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
- /* Unmap the page as we're going to pass it to the stack */
|
|
|
- dma_unmap_page(&bp->pdev->dev,
|
|
|
- dma_unmap_addr(&old_rx_pg, mapping),
|
|
|
- SGE_PAGES, DMA_FROM_DEVICE);
|
|
|
+ dma_unmap_single(&bp->pdev->dev,
|
|
|
+ dma_unmap_addr(&old_rx_pg, mapping),
|
|
|
+ SGE_PAGE_SIZE, DMA_FROM_DEVICE);
|
|
|
/* Add one frag and update the appropriate fields in the skb */
|
|
|
if (fp->mode == TPA_MODE_LRO)
|
|
|
- skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
|
|
|
+ skb_fill_page_desc(skb, j, old_rx_pg.page,
|
|
|
+ old_rx_pg.offset, frag_len);
|
|
|
else { /* GRO */
|
|
|
int rem;
|
|
|
int offset = 0;
|
|
|
for (rem = frag_len; rem > 0; rem -= gro_size) {
|
|
|
int len = rem > gro_size ? gro_size : rem;
|
|
|
skb_fill_page_desc(skb, frag_id++,
|
|
|
- old_rx_pg.page, offset, len);
|
|
|
+ old_rx_pg.page,
|
|
|
+ old_rx_pg.offset + offset,
|
|
|
+ len);
|
|
|
if (offset)
|
|
|
get_page(old_rx_pg.page);
|
|
|
offset += len;
|