|
@@ -587,6 +587,12 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
|
|
|
&adapter->pdev->dev,
|
|
|
rbi->skb->data, rbi->len,
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
+ if (dma_mapping_error(&adapter->pdev->dev,
|
|
|
+ rbi->dma_addr)) {
|
|
|
+ dev_kfree_skb_any(rbi->skb);
|
|
|
+ rq->stats.rx_buf_alloc_failure++;
|
|
|
+ break;
|
|
|
+ }
|
|
|
} else {
|
|
|
/* rx buffer skipped by the device */
|
|
|
}
|
|
@@ -605,13 +611,18 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
|
|
|
&adapter->pdev->dev,
|
|
|
rbi->page, 0, PAGE_SIZE,
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
+ if (dma_mapping_error(&adapter->pdev->dev,
|
|
|
+ rbi->dma_addr)) {
|
|
|
+ put_page(rbi->page);
|
|
|
+ rq->stats.rx_buf_alloc_failure++;
|
|
|
+ break;
|
|
|
+ }
|
|
|
} else {
|
|
|
/* rx buffers skipped by the device */
|
|
|
}
|
|
|
val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
|
|
|
}
|
|
|
|
|
|
- BUG_ON(rbi->dma_addr == 0);
|
|
|
gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
|
|
|
gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
|
|
|
| val | rbi->len);
|
|
@@ -655,7 +666,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
|
|
|
}
|
|
|
|
|
|
|
|
|
-static void
|
|
|
+static int
|
|
|
vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
|
|
struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
|
|
|
struct vmxnet3_adapter *adapter)
|
|
@@ -715,6 +726,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
|
|
tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
|
|
|
skb->data + buf_offset, buf_size,
|
|
|
PCI_DMA_TODEVICE);
|
|
|
+ if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
|
|
|
+ return -EFAULT;
|
|
|
|
|
|
tbi->len = buf_size;
|
|
|
|
|
@@ -755,6 +768,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
|
|
tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
|
|
|
buf_offset, buf_size,
|
|
|
DMA_TO_DEVICE);
|
|
|
+ if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
|
|
|
+ return -EFAULT;
|
|
|
|
|
|
tbi->len = buf_size;
|
|
|
|
|
@@ -782,6 +797,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
|
|
/* set the last buf_info for the pkt */
|
|
|
tbi->skb = skb;
|
|
|
tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
|
|
@@ -1020,7 +1037,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|
|
}
|
|
|
|
|
|
/* fill tx descs related to addr & len */
|
|
|
- vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
|
|
|
+ if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
|
|
|
+ goto unlock_drop_pkt;
|
|
|
|
|
|
/* setup the EOP desc */
|
|
|
ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
|
|
@@ -1231,6 +1249,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|
|
struct vmxnet3_rx_buf_info *rbi;
|
|
|
struct sk_buff *skb, *new_skb = NULL;
|
|
|
struct page *new_page = NULL;
|
|
|
+ dma_addr_t new_dma_addr;
|
|
|
int num_to_alloc;
|
|
|
struct Vmxnet3_RxDesc *rxd;
|
|
|
u32 idx, ring_idx;
|
|
@@ -1287,6 +1306,21 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|
|
skip_page_frags = true;
|
|
|
goto rcd_done;
|
|
|
}
|
|
|
+ new_dma_addr = dma_map_single(&adapter->pdev->dev,
|
|
|
+ new_skb->data, rbi->len,
|
|
|
+ PCI_DMA_FROMDEVICE);
|
|
|
+ if (dma_mapping_error(&adapter->pdev->dev,
|
|
|
+ new_dma_addr)) {
|
|
|
+ dev_kfree_skb(new_skb);
|
|
|
+ /* Skb allocation failed, do not handover this
|
|
|
+ * skb to stack. Reuse it. Drop the existing pkt
|
|
|
+ */
|
|
|
+ rq->stats.rx_buf_alloc_failure++;
|
|
|
+ ctx->skb = NULL;
|
|
|
+ rq->stats.drop_total++;
|
|
|
+ skip_page_frags = true;
|
|
|
+ goto rcd_done;
|
|
|
+ }
|
|
|
|
|
|
dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
|
|
|
rbi->len,
|
|
@@ -1303,9 +1337,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|
|
|
|
|
/* Immediate refill */
|
|
|
rbi->skb = new_skb;
|
|
|
- rbi->dma_addr = dma_map_single(&adapter->pdev->dev,
|
|
|
- rbi->skb->data, rbi->len,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
+ rbi->dma_addr = new_dma_addr;
|
|
|
rxd->addr = cpu_to_le64(rbi->dma_addr);
|
|
|
rxd->len = rbi->len;
|
|
|
if (adapter->version == 2 &&
|
|
@@ -1348,6 +1380,19 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|
|
skip_page_frags = true;
|
|
|
goto rcd_done;
|
|
|
}
|
|
|
+ new_dma_addr = dma_map_page(&adapter->pdev->dev
|
|
|
+ , rbi->page,
|
|
|
+ 0, PAGE_SIZE,
|
|
|
+ PCI_DMA_FROMDEVICE);
|
|
|
+ if (dma_mapping_error(&adapter->pdev->dev,
|
|
|
+ new_dma_addr)) {
|
|
|
+ put_page(new_page);
|
|
|
+ rq->stats.rx_buf_alloc_failure++;
|
|
|
+ dev_kfree_skb(ctx->skb);
|
|
|
+ ctx->skb = NULL;
|
|
|
+ skip_page_frags = true;
|
|
|
+ goto rcd_done;
|
|
|
+ }
|
|
|
|
|
|
dma_unmap_page(&adapter->pdev->dev,
|
|
|
rbi->dma_addr, rbi->len,
|
|
@@ -1357,10 +1402,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|
|
|
|
|
/* Immediate refill */
|
|
|
rbi->page = new_page;
|
|
|
- rbi->dma_addr = dma_map_page(&adapter->pdev->dev
|
|
|
- , rbi->page,
|
|
|
- 0, PAGE_SIZE,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
+ rbi->dma_addr = new_dma_addr;
|
|
|
rxd->addr = cpu_to_le64(rbi->dma_addr);
|
|
|
rxd->len = rbi->len;
|
|
|
}
|
|
@@ -2167,7 +2209,8 @@ vmxnet3_set_mc(struct net_device *netdev)
|
|
|
PCI_DMA_TODEVICE);
|
|
|
}
|
|
|
|
|
|
- if (new_table_pa) {
|
|
|
+ if (!dma_mapping_error(&adapter->pdev->dev,
|
|
|
+ new_table_pa)) {
|
|
|
new_mode |= VMXNET3_RXM_MCAST;
|
|
|
rxConf->mfTablePA = cpu_to_le64(new_table_pa);
|
|
|
} else {
|
|
@@ -3075,6 +3118,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
|
|
adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
|
|
|
sizeof(struct vmxnet3_adapter),
|
|
|
PCI_DMA_TODEVICE);
|
|
|
+ if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
|
|
|
+ dev_err(&pdev->dev, "Failed to map dma\n");
|
|
|
+ err = -EFAULT;
|
|
|
+ goto err_dma_map;
|
|
|
+ }
|
|
|
adapter->shared = dma_alloc_coherent(
|
|
|
&adapter->pdev->dev,
|
|
|
sizeof(struct Vmxnet3_DriverShared),
|
|
@@ -3233,6 +3281,7 @@ err_alloc_queue_desc:
|
|
|
err_alloc_shared:
|
|
|
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
|
|
|
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
|
|
|
+err_dma_map:
|
|
|
free_netdev(netdev);
|
|
|
return err;
|
|
|
}
|