|
|
@@ -1212,9 +1212,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
|
|
|
vp->mii.reg_num_mask = 0x1f;
|
|
|
|
|
|
/* Makes sure rings are at least 16 byte aligned. */
|
|
|
- vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
|
|
|
+ vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
|
|
|
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
|
|
|
- &vp->rx_ring_dma);
|
|
|
+ &vp->rx_ring_dma, GFP_KERNEL);
|
|
|
retval = -ENOMEM;
|
|
|
if (!vp->rx_ring)
|
|
|
goto free_device;
|
|
|
@@ -1476,11 +1476,10 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
|
|
|
return 0;
|
|
|
|
|
|
free_ring:
|
|
|
- pci_free_consistent(pdev,
|
|
|
- sizeof(struct boom_rx_desc) * RX_RING_SIZE
|
|
|
- + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
|
|
|
- vp->rx_ring,
|
|
|
- vp->rx_ring_dma);
|
|
|
+ dma_free_coherent(&pdev->dev,
|
|
|
+ sizeof(struct boom_rx_desc) * RX_RING_SIZE +
|
|
|
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
|
|
|
+ vp->rx_ring, vp->rx_ring_dma);
|
|
|
free_device:
|
|
|
free_netdev(dev);
|
|
|
pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
|
|
|
@@ -1751,9 +1750,9 @@ vortex_open(struct net_device *dev)
|
|
|
break; /* Bad news! */
|
|
|
|
|
|
skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
|
|
|
- dma = pci_map_single(VORTEX_PCI(vp), skb->data,
|
|
|
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
|
|
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
|
|
|
+ dma = dma_map_single(vp->gendev, skb->data,
|
|
|
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
|
|
|
+ if (dma_mapping_error(vp->gendev, dma))
|
|
|
break;
|
|
|
vp->rx_ring[i].addr = cpu_to_le32(dma);
|
|
|
}
|
|
|
@@ -2067,9 +2066,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
if (vp->bus_master) {
|
|
|
/* Set the bus-master controller to transfer the packet. */
|
|
|
int len = (skb->len + 3) & ~3;
|
|
|
- vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
|
|
|
- PCI_DMA_TODEVICE);
|
|
|
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
|
|
|
+ vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) {
|
|
|
dev_kfree_skb_any(skb);
|
|
|
dev->stats.tx_dropped++;
|
|
|
return NETDEV_TX_OK;
|
|
|
@@ -2168,9 +2167,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
|
|
|
|
|
|
if (!skb_shinfo(skb)->nr_frags) {
|
|
|
- dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
|
|
|
- PCI_DMA_TODEVICE);
|
|
|
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
|
|
|
+ dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ if (dma_mapping_error(vp->gendev, dma_addr))
|
|
|
goto out_dma_err;
|
|
|
|
|
|
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
|
|
|
@@ -2178,9 +2177,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
} else {
|
|
|
int i;
|
|
|
|
|
|
- dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
|
|
|
- skb_headlen(skb), PCI_DMA_TODEVICE);
|
|
|
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
|
|
|
+ dma_addr = dma_map_single(vp->gendev, skb->data,
|
|
|
+ skb_headlen(skb), DMA_TO_DEVICE);
|
|
|
+ if (dma_mapping_error(vp->gendev, dma_addr))
|
|
|
goto out_dma_err;
|
|
|
|
|
|
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
|
|
|
@@ -2189,21 +2188,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
|
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
|
|
|
|
- dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
|
|
|
+ dma_addr = skb_frag_dma_map(vp->gendev, frag,
|
|
|
0,
|
|
|
frag->size,
|
|
|
DMA_TO_DEVICE);
|
|
|
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
|
|
|
+ if (dma_mapping_error(vp->gendev, dma_addr)) {
|
|
|
for(i = i-1; i >= 0; i--)
|
|
|
- dma_unmap_page(&VORTEX_PCI(vp)->dev,
|
|
|
+ dma_unmap_page(vp->gendev,
|
|
|
le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
|
|
|
le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
|
- pci_unmap_single(VORTEX_PCI(vp),
|
|
|
+ dma_unmap_single(vp->gendev,
|
|
|
le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
|
|
|
le32_to_cpu(vp->tx_ring[entry].frag[0].length),
|
|
|
- PCI_DMA_TODEVICE);
|
|
|
+ DMA_TO_DEVICE);
|
|
|
|
|
|
goto out_dma_err;
|
|
|
}
|
|
|
@@ -2218,8 +2217,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
}
|
|
|
}
|
|
|
#else
|
|
|
- dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE);
|
|
|
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
|
|
|
+ dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
|
|
|
+ if (dma_mapping_error(vp->gendev, dma_addr))
|
|
|
goto out_dma_err;
|
|
|
vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
|
|
|
vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
|
|
|
@@ -2254,7 +2253,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
out:
|
|
|
return NETDEV_TX_OK;
|
|
|
out_dma_err:
|
|
|
- dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
|
|
|
+ dev_err(vp->gendev, "Error mapping dma buffer\n");
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
@@ -2322,7 +2321,7 @@ vortex_interrupt(int irq, void *dev_id)
|
|
|
if (status & DMADone) {
|
|
|
if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
|
|
|
iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
|
|
|
- pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
|
|
|
+ dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
|
|
|
pkts_compl++;
|
|
|
bytes_compl += vp->tx_skb->len;
|
|
|
dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
|
|
|
@@ -2459,19 +2458,19 @@ boomerang_interrupt(int irq, void *dev_id)
|
|
|
struct sk_buff *skb = vp->tx_skbuff[entry];
|
|
|
#if DO_ZEROCOPY
|
|
|
int i;
|
|
|
- pci_unmap_single(VORTEX_PCI(vp),
|
|
|
+ dma_unmap_single(vp->gendev,
|
|
|
le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
|
|
|
le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
|
|
|
- PCI_DMA_TODEVICE);
|
|
|
+ DMA_TO_DEVICE);
|
|
|
|
|
|
for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
|
|
|
- pci_unmap_page(VORTEX_PCI(vp),
|
|
|
+ dma_unmap_page(vp->gendev,
|
|
|
le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
|
|
|
le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
|
|
|
- PCI_DMA_TODEVICE);
|
|
|
+ DMA_TO_DEVICE);
|
|
|
#else
|
|
|
- pci_unmap_single(VORTEX_PCI(vp),
|
|
|
- le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
|
|
|
+ dma_unmap_single(vp->gendev,
|
|
|
+ le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
|
|
|
#endif
|
|
|
pkts_compl++;
|
|
|
bytes_compl += skb->len;
|
|
|
@@ -2561,14 +2560,14 @@ static int vortex_rx(struct net_device *dev)
|
|
|
/* 'skb_put()' points to the start of sk_buff data area. */
|
|
|
if (vp->bus_master &&
|
|
|
! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
|
|
|
- dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
|
|
|
- pkt_len, PCI_DMA_FROMDEVICE);
|
|
|
+ dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
|
|
|
+ pkt_len, DMA_FROM_DEVICE);
|
|
|
iowrite32(dma, ioaddr + Wn7_MasterAddr);
|
|
|
iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
|
|
|
iowrite16(StartDMAUp, ioaddr + EL3_CMD);
|
|
|
while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
|
|
|
;
|
|
|
- pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
|
|
|
+ dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE);
|
|
|
} else {
|
|
|
ioread32_rep(ioaddr + RX_FIFO,
|
|
|
skb_put(skb, pkt_len),
|
|
|
@@ -2635,11 +2634,11 @@ boomerang_rx(struct net_device *dev)
|
|
|
if (pkt_len < rx_copybreak &&
|
|
|
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
|
|
|
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
|
|
- pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
|
|
+ dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
|
|
|
/* 'skb_put()' points to the start of sk_buff data area. */
|
|
|
skb_put_data(skb, vp->rx_skbuff[entry]->data,
|
|
|
pkt_len);
|
|
|
- pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
|
|
+ dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
|
|
|
vp->rx_copy++;
|
|
|
} else {
|
|
|
/* Pre-allocate the replacement skb. If it or its
|
|
|
@@ -2651,9 +2650,9 @@ boomerang_rx(struct net_device *dev)
|
|
|
dev->stats.rx_dropped++;
|
|
|
goto clear_complete;
|
|
|
}
|
|
|
- newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
|
|
|
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
|
|
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
|
|
|
+ newdma = dma_map_single(vp->gendev, newskb->data,
|
|
|
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
|
|
|
+ if (dma_mapping_error(vp->gendev, newdma)) {
|
|
|
dev->stats.rx_dropped++;
|
|
|
consume_skb(newskb);
|
|
|
goto clear_complete;
|
|
|
@@ -2664,7 +2663,7 @@ boomerang_rx(struct net_device *dev)
|
|
|
vp->rx_skbuff[entry] = newskb;
|
|
|
vp->rx_ring[entry].addr = cpu_to_le32(newdma);
|
|
|
skb_put(skb, pkt_len);
|
|
|
- pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
|
|
+ dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
|
|
|
vp->rx_nocopy++;
|
|
|
}
|
|
|
skb->protocol = eth_type_trans(skb, dev);
|
|
|
@@ -2761,8 +2760,8 @@ vortex_close(struct net_device *dev)
|
|
|
if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
|
|
|
for (i = 0; i < RX_RING_SIZE; i++)
|
|
|
if (vp->rx_skbuff[i]) {
|
|
|
- pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
|
|
|
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
|
|
+ dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr),
|
|
|
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
|
|
|
dev_kfree_skb(vp->rx_skbuff[i]);
|
|
|
vp->rx_skbuff[i] = NULL;
|
|
|
}
|
|
|
@@ -2775,12 +2774,12 @@ vortex_close(struct net_device *dev)
|
|
|
int k;
|
|
|
|
|
|
for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
|
|
|
- pci_unmap_single(VORTEX_PCI(vp),
|
|
|
+ dma_unmap_single(vp->gendev,
|
|
|
le32_to_cpu(vp->tx_ring[i].frag[k].addr),
|
|
|
le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
|
|
|
- PCI_DMA_TODEVICE);
|
|
|
+ DMA_TO_DEVICE);
|
|
|
#else
|
|
|
- pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
|
|
|
+ dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
|
|
|
#endif
|
|
|
dev_kfree_skb(skb);
|
|
|
vp->tx_skbuff[i] = NULL;
|
|
|
@@ -3288,11 +3287,10 @@ static void vortex_remove_one(struct pci_dev *pdev)
|
|
|
|
|
|
pci_iounmap(pdev, vp->ioaddr);
|
|
|
|
|
|
- pci_free_consistent(pdev,
|
|
|
- sizeof(struct boom_rx_desc) * RX_RING_SIZE
|
|
|
- + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
|
|
|
- vp->rx_ring,
|
|
|
- vp->rx_ring_dma);
|
|
|
+ dma_free_coherent(&pdev->dev,
|
|
|
+ sizeof(struct boom_rx_desc) * RX_RING_SIZE +
|
|
|
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
|
|
|
+ vp->rx_ring, vp->rx_ring_dma);
|
|
|
|
|
|
pci_release_regions(pdev);
|
|
|
|