|
@@ -94,6 +94,10 @@ static const int multicast_filter_limit = 32;
|
|
|
#include <linux/ioport.h>
|
|
|
#include <linux/interrupt.h>
|
|
|
#include <linux/pci.h>
|
|
|
+#include <linux/of_address.h>
|
|
|
+#include <linux/of_device.h>
|
|
|
+#include <linux/of_irq.h>
|
|
|
+#include <linux/platform_device.h>
|
|
|
#include <linux/dma-mapping.h>
|
|
|
#include <linux/netdevice.h>
|
|
|
#include <linux/etherdevice.h>
|
|
@@ -279,6 +283,15 @@ static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
|
|
|
};
|
|
|
MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
|
|
|
|
|
|
+/* OpenFirmware identifiers for platform-bus devices
|
|
|
+ * The .data field is currently only used to store chip revision
|
|
|
+ * (for quirks etc.)
|
|
|
+ */
|
|
|
+static struct of_device_id rhine_of_tbl[] = {
|
|
|
+ { .compatible = "via,vt8500-rhine", .data = (void *)0x84 },
|
|
|
+ { } /* terminate list */
|
|
|
+};
|
|
|
+MODULE_DEVICE_TABLE(of, rhine_of_tbl);
|
|
|
|
|
|
/* Offsets to the device registers. */
|
|
|
enum register_offsets {
|
|
@@ -446,7 +459,8 @@ struct rhine_private {
|
|
|
unsigned char *tx_bufs;
|
|
|
dma_addr_t tx_bufs_dma;
|
|
|
|
|
|
- struct pci_dev *pdev;
|
|
|
+ int revision;
|
|
|
+ int irq;
|
|
|
long pioaddr;
|
|
|
struct net_device *dev;
|
|
|
struct napi_struct napi;
|
|
@@ -701,7 +715,7 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
|
|
|
static void rhine_poll(struct net_device *dev)
|
|
|
{
|
|
|
struct rhine_private *rp = netdev_priv(dev);
|
|
|
- const int irq = rp->pdev->irq;
|
|
|
+ const int irq = rp->irq;
|
|
|
|
|
|
disable_irq(irq);
|
|
|
rhine_interrupt(irq, dev);
|
|
@@ -846,7 +860,8 @@ static void rhine_hw_init(struct net_device *dev, long pioaddr)
|
|
|
msleep(5);
|
|
|
|
|
|
/* Reload EEPROM controlled bytes cleared by soft reset */
|
|
|
- rhine_reload_eeprom(pioaddr, dev);
|
|
|
+ if (dev_is_pci(dev->dev.parent))
|
|
|
+ rhine_reload_eeprom(pioaddr, dev);
|
|
|
}
|
|
|
|
|
|
static const struct net_device_ops rhine_netdev_ops = {
|
|
@@ -867,125 +882,55 @@ static const struct net_device_ops rhine_netdev_ops = {
|
|
|
#endif
|
|
|
};
|
|
|
|
|
|
-static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
+static int rhine_init_one_common(struct device *hwdev, int revision,
|
|
|
+ long pioaddr, void __iomem *ioaddr, int irq)
|
|
|
{
|
|
|
struct net_device *dev;
|
|
|
struct rhine_private *rp;
|
|
|
- int i, rc;
|
|
|
- u32 quirks;
|
|
|
- long pioaddr;
|
|
|
- long memaddr;
|
|
|
- void __iomem *ioaddr;
|
|
|
- int io_size, phy_id;
|
|
|
+ int i, rc, phy_id;
|
|
|
const char *name;
|
|
|
-#ifdef USE_MMIO
|
|
|
- int bar = 1;
|
|
|
-#else
|
|
|
- int bar = 0;
|
|
|
-#endif
|
|
|
-
|
|
|
-/* when built into the kernel, we only print version if device is found */
|
|
|
-#ifndef MODULE
|
|
|
- pr_info_once("%s\n", version);
|
|
|
-#endif
|
|
|
-
|
|
|
- io_size = 256;
|
|
|
- phy_id = 0;
|
|
|
- quirks = 0;
|
|
|
- name = "Rhine";
|
|
|
- if (pdev->revision < VTunknown0) {
|
|
|
- quirks = rqRhineI;
|
|
|
- io_size = 128;
|
|
|
- }
|
|
|
- else if (pdev->revision >= VT6102) {
|
|
|
- quirks = rqWOL | rqForceReset;
|
|
|
- if (pdev->revision < VT6105) {
|
|
|
- name = "Rhine II";
|
|
|
- quirks |= rqStatusWBRace; /* Rhine-II exclusive */
|
|
|
- }
|
|
|
- else {
|
|
|
- phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
|
|
|
- if (pdev->revision >= VT6105_B0)
|
|
|
- quirks |= rq6patterns;
|
|
|
- if (pdev->revision < VT6105M)
|
|
|
- name = "Rhine III";
|
|
|
- else
|
|
|
- name = "Rhine III (Management Adapter)";
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- rc = pci_enable_device(pdev);
|
|
|
- if (rc)
|
|
|
- goto err_out;
|
|
|
|
|
|
/* this should always be supported */
|
|
|
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
|
+ rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
|
|
|
if (rc) {
|
|
|
- dev_err(&pdev->dev,
|
|
|
- "32-bit PCI DMA addresses not supported by the card!?\n");
|
|
|
- goto err_out_pci_disable;
|
|
|
- }
|
|
|
-
|
|
|
- /* sanity check */
|
|
|
- if ((pci_resource_len(pdev, 0) < io_size) ||
|
|
|
- (pci_resource_len(pdev, 1) < io_size)) {
|
|
|
- rc = -EIO;
|
|
|
- dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
|
|
|
- goto err_out_pci_disable;
|
|
|
+ dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
|
|
|
+ goto err_out;
|
|
|
}
|
|
|
|
|
|
- pioaddr = pci_resource_start(pdev, 0);
|
|
|
- memaddr = pci_resource_start(pdev, 1);
|
|
|
-
|
|
|
- pci_set_master(pdev);
|
|
|
-
|
|
|
dev = alloc_etherdev(sizeof(struct rhine_private));
|
|
|
if (!dev) {
|
|
|
rc = -ENOMEM;
|
|
|
- goto err_out_pci_disable;
|
|
|
+ goto err_out;
|
|
|
}
|
|
|
- SET_NETDEV_DEV(dev, &pdev->dev);
|
|
|
+ SET_NETDEV_DEV(dev, hwdev);
|
|
|
|
|
|
rp = netdev_priv(dev);
|
|
|
rp->dev = dev;
|
|
|
- rp->quirks = quirks;
|
|
|
+ rp->revision = revision;
|
|
|
rp->pioaddr = pioaddr;
|
|
|
- rp->pdev = pdev;
|
|
|
+ rp->base = ioaddr;
|
|
|
+ rp->irq = irq;
|
|
|
rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
|
|
|
|
|
|
- rc = pci_request_regions(pdev, DRV_NAME);
|
|
|
- if (rc)
|
|
|
- goto err_out_free_netdev;
|
|
|
-
|
|
|
- ioaddr = pci_iomap(pdev, bar, io_size);
|
|
|
- if (!ioaddr) {
|
|
|
- rc = -EIO;
|
|
|
- dev_err(&pdev->dev,
|
|
|
- "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
|
|
|
- pci_name(pdev), io_size, memaddr);
|
|
|
- goto err_out_free_res;
|
|
|
- }
|
|
|
-
|
|
|
-#ifdef USE_MMIO
|
|
|
- enable_mmio(pioaddr, quirks);
|
|
|
-
|
|
|
- /* Check that selected MMIO registers match the PIO ones */
|
|
|
- i = 0;
|
|
|
- while (mmio_verify_registers[i]) {
|
|
|
- int reg = mmio_verify_registers[i++];
|
|
|
- unsigned char a = inb(pioaddr+reg);
|
|
|
- unsigned char b = readb(ioaddr+reg);
|
|
|
- if (a != b) {
|
|
|
- rc = -EIO;
|
|
|
- dev_err(&pdev->dev,
|
|
|
- "MMIO do not match PIO [%02x] (%02x != %02x)\n",
|
|
|
- reg, a, b);
|
|
|
- goto err_out_unmap;
|
|
|
+ phy_id = 0;
|
|
|
+ name = "Rhine";
|
|
|
+ if (revision < VTunknown0) {
|
|
|
+ rp->quirks = rqRhineI;
|
|
|
+ } else if (revision >= VT6102) {
|
|
|
+ rp->quirks = rqWOL | rqForceReset;
|
|
|
+ if (revision < VT6105) {
|
|
|
+ name = "Rhine II";
|
|
|
+ rp->quirks |= rqStatusWBRace; /* Rhine-II exclusive */
|
|
|
+ } else {
|
|
|
+ phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
|
|
|
+ if (revision >= VT6105_B0)
|
|
|
+ rp->quirks |= rq6patterns;
|
|
|
+ if (revision < VT6105M)
|
|
|
+ name = "Rhine III";
|
|
|
+ else
|
|
|
+ name = "Rhine III (Management Adapter)";
|
|
|
}
|
|
|
}
|
|
|
-#endif /* USE_MMIO */
|
|
|
-
|
|
|
- rp->base = ioaddr;
|
|
|
|
|
|
u64_stats_init(&rp->tx_stats.syncp);
|
|
|
u64_stats_init(&rp->rx_stats.syncp);
|
|
@@ -1030,7 +975,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
if (rp->quirks & rqRhineI)
|
|
|
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
|
|
|
|
|
|
- if (pdev->revision >= VT6105M)
|
|
|
+ if (rp->revision >= VT6105M)
|
|
|
dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
|
|
|
NETIF_F_HW_VLAN_CTAG_RX |
|
|
|
NETIF_F_HW_VLAN_CTAG_FILTER;
|
|
@@ -1038,18 +983,12 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
/* dev->name not defined before register_netdev()! */
|
|
|
rc = register_netdev(dev);
|
|
|
if (rc)
|
|
|
- goto err_out_unmap;
|
|
|
+ goto err_out_free_netdev;
|
|
|
|
|
|
netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
|
|
|
- name,
|
|
|
-#ifdef USE_MMIO
|
|
|
- memaddr,
|
|
|
-#else
|
|
|
- (long)ioaddr,
|
|
|
-#endif
|
|
|
- dev->dev_addr, pdev->irq);
|
|
|
+ name, (long)ioaddr, dev->dev_addr, rp->irq);
|
|
|
|
|
|
- pci_set_drvdata(pdev, dev);
|
|
|
+ dev_set_drvdata(hwdev, dev);
|
|
|
|
|
|
{
|
|
|
u16 mii_cmd;
|
|
@@ -1078,41 +1017,152 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
+err_out_free_netdev:
|
|
|
+ free_netdev(dev);
|
|
|
+err_out:
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+static int rhine_init_one_pci(struct pci_dev *pdev,
|
|
|
+ const struct pci_device_id *ent)
|
|
|
+{
|
|
|
+ struct device *hwdev = &pdev->dev;
|
|
|
+ int i, rc;
|
|
|
+ long pioaddr, memaddr;
|
|
|
+ void __iomem *ioaddr;
|
|
|
+ int io_size = pdev->revision < VTunknown0 ? 128 : 256;
|
|
|
+ u32 quirks = pdev->revision < VTunknown0 ? rqRhineI : 0;
|
|
|
+#ifdef USE_MMIO
|
|
|
+ int bar = 1;
|
|
|
+#else
|
|
|
+ int bar = 0;
|
|
|
+#endif
|
|
|
+
|
|
|
+/* when built into the kernel, we only print version if device is found */
|
|
|
+#ifndef MODULE
|
|
|
+ pr_info_once("%s\n", version);
|
|
|
+#endif
|
|
|
+
|
|
|
+ rc = pci_enable_device(pdev);
|
|
|
+ if (rc)
|
|
|
+ goto err_out;
|
|
|
+
|
|
|
+ /* sanity check */
|
|
|
+ if ((pci_resource_len(pdev, 0) < io_size) ||
|
|
|
+ (pci_resource_len(pdev, 1) < io_size)) {
|
|
|
+ rc = -EIO;
|
|
|
+ dev_err(hwdev, "Insufficient PCI resources, aborting\n");
|
|
|
+ goto err_out_pci_disable;
|
|
|
+ }
|
|
|
+
|
|
|
+ pioaddr = pci_resource_start(pdev, 0);
|
|
|
+ memaddr = pci_resource_start(pdev, 1);
|
|
|
+
|
|
|
+ pci_set_master(pdev);
|
|
|
+
|
|
|
+ rc = pci_request_regions(pdev, DRV_NAME);
|
|
|
+ if (rc)
|
|
|
+ goto err_out_pci_disable;
|
|
|
+
|
|
|
+ ioaddr = pci_iomap(pdev, bar, io_size);
|
|
|
+ if (!ioaddr) {
|
|
|
+ rc = -EIO;
|
|
|
+ dev_err(hwdev,
|
|
|
+ "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
|
|
|
+ dev_name(hwdev), io_size, memaddr);
|
|
|
+ goto err_out_free_res;
|
|
|
+ }
|
|
|
+
|
|
|
+#ifdef USE_MMIO
|
|
|
+ enable_mmio(pioaddr, quirks);
|
|
|
+
|
|
|
+ /* Check that selected MMIO registers match the PIO ones */
|
|
|
+ i = 0;
|
|
|
+ while (mmio_verify_registers[i]) {
|
|
|
+ int reg = mmio_verify_registers[i++];
|
|
|
+ unsigned char a = inb(pioaddr+reg);
|
|
|
+ unsigned char b = readb(ioaddr+reg);
|
|
|
+
|
|
|
+ if (a != b) {
|
|
|
+ rc = -EIO;
|
|
|
+ dev_err(hwdev,
|
|
|
+ "MMIO do not match PIO [%02x] (%02x != %02x)\n",
|
|
|
+ reg, a, b);
|
|
|
+ goto err_out_unmap;
|
|
|
+ }
|
|
|
+ }
|
|
|
+#endif /* USE_MMIO */
|
|
|
+
|
|
|
+ rc = rhine_init_one_common(&pdev->dev, pdev->revision,
|
|
|
+ pioaddr, ioaddr, pdev->irq);
|
|
|
+ if (!rc)
|
|
|
+ return 0;
|
|
|
+
|
|
|
err_out_unmap:
|
|
|
pci_iounmap(pdev, ioaddr);
|
|
|
err_out_free_res:
|
|
|
pci_release_regions(pdev);
|
|
|
-err_out_free_netdev:
|
|
|
- free_netdev(dev);
|
|
|
err_out_pci_disable:
|
|
|
pci_disable_device(pdev);
|
|
|
err_out:
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
+static int rhine_init_one_platform(struct platform_device *pdev)
|
|
|
+{
|
|
|
+ const struct of_device_id *match;
|
|
|
+ u32 revision;
|
|
|
+ int irq;
|
|
|
+ struct resource *res;
|
|
|
+ void __iomem *ioaddr;
|
|
|
+
|
|
|
+ match = of_match_device(rhine_of_tbl, &pdev->dev);
|
|
|
+ if (!match)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
|
+ ioaddr = devm_ioremap_resource(&pdev->dev, res);
|
|
|
+ if (IS_ERR(ioaddr))
|
|
|
+ return PTR_ERR(ioaddr);
|
|
|
+
|
|
|
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
|
|
|
+ if (!irq)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ revision = (u32)match->data;
|
|
|
+ if (!revision)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ return rhine_init_one_common(&pdev->dev, revision,
|
|
|
+ (long)ioaddr, ioaddr, irq);
|
|
|
+}
|
|
|
+
|
|
|
static int alloc_ring(struct net_device* dev)
|
|
|
{
|
|
|
struct rhine_private *rp = netdev_priv(dev);
|
|
|
+ struct device *hwdev = dev->dev.parent;
|
|
|
void *ring;
|
|
|
dma_addr_t ring_dma;
|
|
|
|
|
|
- ring = pci_alloc_consistent(rp->pdev,
|
|
|
- RX_RING_SIZE * sizeof(struct rx_desc) +
|
|
|
- TX_RING_SIZE * sizeof(struct tx_desc),
|
|
|
- &ring_dma);
|
|
|
+ ring = dma_alloc_coherent(hwdev,
|
|
|
+ RX_RING_SIZE * sizeof(struct rx_desc) +
|
|
|
+ TX_RING_SIZE * sizeof(struct tx_desc),
|
|
|
+ &ring_dma,
|
|
|
+ GFP_ATOMIC);
|
|
|
if (!ring) {
|
|
|
netdev_err(dev, "Could not allocate DMA memory\n");
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
if (rp->quirks & rqRhineI) {
|
|
|
- rp->tx_bufs = pci_alloc_consistent(rp->pdev,
|
|
|
- PKT_BUF_SZ * TX_RING_SIZE,
|
|
|
- &rp->tx_bufs_dma);
|
|
|
+ rp->tx_bufs = dma_alloc_coherent(hwdev,
|
|
|
+ PKT_BUF_SZ * TX_RING_SIZE,
|
|
|
+ &rp->tx_bufs_dma,
|
|
|
+ GFP_ATOMIC);
|
|
|
if (rp->tx_bufs == NULL) {
|
|
|
- pci_free_consistent(rp->pdev,
|
|
|
- RX_RING_SIZE * sizeof(struct rx_desc) +
|
|
|
- TX_RING_SIZE * sizeof(struct tx_desc),
|
|
|
- ring, ring_dma);
|
|
|
+ dma_free_coherent(hwdev,
|
|
|
+ RX_RING_SIZE * sizeof(struct rx_desc) +
|
|
|
+ TX_RING_SIZE * sizeof(struct tx_desc),
|
|
|
+ ring, ring_dma);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
}
|
|
@@ -1128,16 +1178,17 @@ static int alloc_ring(struct net_device* dev)
|
|
|
static void free_ring(struct net_device* dev)
|
|
|
{
|
|
|
struct rhine_private *rp = netdev_priv(dev);
|
|
|
+ struct device *hwdev = dev->dev.parent;
|
|
|
|
|
|
- pci_free_consistent(rp->pdev,
|
|
|
- RX_RING_SIZE * sizeof(struct rx_desc) +
|
|
|
- TX_RING_SIZE * sizeof(struct tx_desc),
|
|
|
- rp->rx_ring, rp->rx_ring_dma);
|
|
|
+ dma_free_coherent(hwdev,
|
|
|
+ RX_RING_SIZE * sizeof(struct rx_desc) +
|
|
|
+ TX_RING_SIZE * sizeof(struct tx_desc),
|
|
|
+ rp->rx_ring, rp->rx_ring_dma);
|
|
|
rp->tx_ring = NULL;
|
|
|
|
|
|
if (rp->tx_bufs)
|
|
|
- pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
|
|
|
- rp->tx_bufs, rp->tx_bufs_dma);
|
|
|
+ dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
|
|
|
+ rp->tx_bufs, rp->tx_bufs_dma);
|
|
|
|
|
|
rp->tx_bufs = NULL;
|
|
|
|
|
@@ -1146,6 +1197,7 @@ static void free_ring(struct net_device* dev)
|
|
|
static void alloc_rbufs(struct net_device *dev)
|
|
|
{
|
|
|
struct rhine_private *rp = netdev_priv(dev);
|
|
|
+ struct device *hwdev = dev->dev.parent;
|
|
|
dma_addr_t next;
|
|
|
int i;
|
|
|
|
|
@@ -1174,9 +1226,9 @@ static void alloc_rbufs(struct net_device *dev)
|
|
|
break;
|
|
|
|
|
|
rp->rx_skbuff_dma[i] =
|
|
|
- pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
- if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[i])) {
|
|
|
+ dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+ if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
|
|
|
rp->rx_skbuff_dma[i] = 0;
|
|
|
dev_kfree_skb(skb);
|
|
|
break;
|
|
@@ -1190,6 +1242,7 @@ static void alloc_rbufs(struct net_device *dev)
|
|
|
static void free_rbufs(struct net_device* dev)
|
|
|
{
|
|
|
struct rhine_private *rp = netdev_priv(dev);
|
|
|
+ struct device *hwdev = dev->dev.parent;
|
|
|
int i;
|
|
|
|
|
|
/* Free all the skbuffs in the Rx queue. */
|
|
@@ -1197,9 +1250,9 @@ static void free_rbufs(struct net_device* dev)
|
|
|
rp->rx_ring[i].rx_status = 0;
|
|
|
rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
|
|
|
if (rp->rx_skbuff[i]) {
|
|
|
- pci_unmap_single(rp->pdev,
|
|
|
+ dma_unmap_single(hwdev,
|
|
|
rp->rx_skbuff_dma[i],
|
|
|
- rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
|
|
+ rp->rx_buf_sz, DMA_FROM_DEVICE);
|
|
|
dev_kfree_skb(rp->rx_skbuff[i]);
|
|
|
}
|
|
|
rp->rx_skbuff[i] = NULL;
|
|
@@ -1230,6 +1283,7 @@ static void alloc_tbufs(struct net_device* dev)
|
|
|
static void free_tbufs(struct net_device* dev)
|
|
|
{
|
|
|
struct rhine_private *rp = netdev_priv(dev);
|
|
|
+ struct device *hwdev = dev->dev.parent;
|
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < TX_RING_SIZE; i++) {
|
|
@@ -1238,10 +1292,10 @@ static void free_tbufs(struct net_device* dev)
|
|
|
rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
|
|
|
if (rp->tx_skbuff[i]) {
|
|
|
if (rp->tx_skbuff_dma[i]) {
|
|
|
- pci_unmap_single(rp->pdev,
|
|
|
+ dma_unmap_single(hwdev,
|
|
|
rp->tx_skbuff_dma[i],
|
|
|
rp->tx_skbuff[i]->len,
|
|
|
- PCI_DMA_TODEVICE);
|
|
|
+ DMA_TO_DEVICE);
|
|
|
}
|
|
|
dev_kfree_skb(rp->tx_skbuff[i]);
|
|
|
}
|
|
@@ -1469,7 +1523,7 @@ static void init_registers(struct net_device *dev)
|
|
|
|
|
|
rhine_set_rx_mode(dev);
|
|
|
|
|
|
- if (rp->pdev->revision >= VT6105M)
|
|
|
+ if (rp->revision >= VT6105M)
|
|
|
rhine_init_cam_filter(dev);
|
|
|
|
|
|
napi_enable(&rp->napi);
|
|
@@ -1581,16 +1635,15 @@ static int rhine_open(struct net_device *dev)
|
|
|
void __iomem *ioaddr = rp->base;
|
|
|
int rc;
|
|
|
|
|
|
- rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
|
|
|
- dev);
|
|
|
+ rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
|
|
|
if (rc)
|
|
|
return rc;
|
|
|
|
|
|
- netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
|
|
|
+ netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
|
|
|
|
|
|
rc = alloc_ring(dev);
|
|
|
if (rc) {
|
|
|
- free_irq(rp->pdev->irq, dev);
|
|
|
+ free_irq(rp->irq, dev);
|
|
|
return rc;
|
|
|
}
|
|
|
alloc_rbufs(dev);
|
|
@@ -1659,6 +1712,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
|
|
|
struct net_device *dev)
|
|
|
{
|
|
|
struct rhine_private *rp = netdev_priv(dev);
|
|
|
+ struct device *hwdev = dev->dev.parent;
|
|
|
void __iomem *ioaddr = rp->base;
|
|
|
unsigned entry;
|
|
|
|
|
@@ -1695,9 +1749,9 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
|
|
|
rp->tx_bufs));
|
|
|
} else {
|
|
|
rp->tx_skbuff_dma[entry] =
|
|
|
- pci_map_single(rp->pdev, skb->data, skb->len,
|
|
|
- PCI_DMA_TODEVICE);
|
|
|
- if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
|
|
|
+ dma_map_single(hwdev, skb->data, skb->len,
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
|
|
|
dev_kfree_skb_any(skb);
|
|
|
rp->tx_skbuff_dma[entry] = 0;
|
|
|
dev->stats.tx_dropped++;
|
|
@@ -1788,6 +1842,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
|
|
|
static void rhine_tx(struct net_device *dev)
|
|
|
{
|
|
|
struct rhine_private *rp = netdev_priv(dev);
|
|
|
+ struct device *hwdev = dev->dev.parent;
|
|
|
int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
|
|
|
|
|
|
/* find and cleanup dirty tx descriptors */
|
|
@@ -1831,10 +1886,10 @@ static void rhine_tx(struct net_device *dev)
|
|
|
}
|
|
|
/* Free the original skb. */
|
|
|
if (rp->tx_skbuff_dma[entry]) {
|
|
|
- pci_unmap_single(rp->pdev,
|
|
|
+ dma_unmap_single(hwdev,
|
|
|
rp->tx_skbuff_dma[entry],
|
|
|
rp->tx_skbuff[entry]->len,
|
|
|
- PCI_DMA_TODEVICE);
|
|
|
+ DMA_TO_DEVICE);
|
|
|
}
|
|
|
dev_consume_skb_any(rp->tx_skbuff[entry]);
|
|
|
rp->tx_skbuff[entry] = NULL;
|
|
@@ -1863,6 +1918,7 @@ static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
|
|
|
static int rhine_rx(struct net_device *dev, int limit)
|
|
|
{
|
|
|
struct rhine_private *rp = netdev_priv(dev);
|
|
|
+ struct device *hwdev = dev->dev.parent;
|
|
|
int count;
|
|
|
int entry = rp->cur_rx % RX_RING_SIZE;
|
|
|
|
|
@@ -1924,19 +1980,19 @@ static int rhine_rx(struct net_device *dev, int limit)
|
|
|
if (pkt_len < rx_copybreak)
|
|
|
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
|
|
|
if (skb) {
|
|
|
- pci_dma_sync_single_for_cpu(rp->pdev,
|
|
|
- rp->rx_skbuff_dma[entry],
|
|
|
- rp->rx_buf_sz,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
+ dma_sync_single_for_cpu(hwdev,
|
|
|
+ rp->rx_skbuff_dma[entry],
|
|
|
+ rp->rx_buf_sz,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
|
|
|
skb_copy_to_linear_data(skb,
|
|
|
rp->rx_skbuff[entry]->data,
|
|
|
pkt_len);
|
|
|
skb_put(skb, pkt_len);
|
|
|
- pci_dma_sync_single_for_device(rp->pdev,
|
|
|
- rp->rx_skbuff_dma[entry],
|
|
|
- rp->rx_buf_sz,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
+ dma_sync_single_for_device(hwdev,
|
|
|
+ rp->rx_skbuff_dma[entry],
|
|
|
+ rp->rx_buf_sz,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
} else {
|
|
|
skb = rp->rx_skbuff[entry];
|
|
|
if (skb == NULL) {
|
|
@@ -1945,10 +2001,10 @@ static int rhine_rx(struct net_device *dev, int limit)
|
|
|
}
|
|
|
rp->rx_skbuff[entry] = NULL;
|
|
|
skb_put(skb, pkt_len);
|
|
|
- pci_unmap_single(rp->pdev,
|
|
|
+ dma_unmap_single(hwdev,
|
|
|
rp->rx_skbuff_dma[entry],
|
|
|
rp->rx_buf_sz,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
}
|
|
|
|
|
|
if (unlikely(desc_length & DescTag))
|
|
@@ -1979,10 +2035,11 @@ static int rhine_rx(struct net_device *dev, int limit)
|
|
|
if (skb == NULL)
|
|
|
break; /* Better luck next round. */
|
|
|
rp->rx_skbuff_dma[entry] =
|
|
|
- pci_map_single(rp->pdev, skb->data,
|
|
|
+ dma_map_single(hwdev, skb->data,
|
|
|
rp->rx_buf_sz,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
- if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[entry])) {
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+ if (dma_mapping_error(hwdev,
|
|
|
+ rp->rx_skbuff_dma[entry])) {
|
|
|
dev_kfree_skb(skb);
|
|
|
rp->rx_skbuff_dma[entry] = 0;
|
|
|
break;
|
|
@@ -2103,7 +2160,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
|
|
|
/* Too many to match, or accept all multicasts. */
|
|
|
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
|
|
|
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
|
|
|
- } else if (rp->pdev->revision >= VT6105M) {
|
|
|
+ } else if (rp->revision >= VT6105M) {
|
|
|
int i = 0;
|
|
|
u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
|
|
|
netdev_for_each_mc_addr(ha, dev) {
|
|
@@ -2125,7 +2182,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
|
|
|
iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
|
|
|
}
|
|
|
/* enable/disable VLAN receive filtering */
|
|
|
- if (rp->pdev->revision >= VT6105M) {
|
|
|
+ if (rp->revision >= VT6105M) {
|
|
|
if (dev->flags & IFF_PROMISC)
|
|
|
BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
|
|
|
else
|
|
@@ -2136,11 +2193,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
|
|
|
|
|
|
static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
|
|
|
{
|
|
|
- struct rhine_private *rp = netdev_priv(dev);
|
|
|
+ struct device *hwdev = dev->dev.parent;
|
|
|
|
|
|
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
|
|
|
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
|
|
|
- strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
|
|
|
+ strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
|
|
|
}
|
|
|
|
|
|
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|
@@ -2277,7 +2334,7 @@ static int rhine_close(struct net_device *dev)
|
|
|
/* Stop the chip's Tx and Rx processes. */
|
|
|
iowrite16(CmdStop, ioaddr + ChipCmd);
|
|
|
|
|
|
- free_irq(rp->pdev->irq, dev);
|
|
|
+ free_irq(rp->irq, dev);
|
|
|
free_rbufs(dev);
|
|
|
free_tbufs(dev);
|
|
|
free_ring(dev);
|
|
@@ -2286,7 +2343,7 @@ static int rhine_close(struct net_device *dev)
|
|
|
}
|
|
|
|
|
|
|
|
|
-static void rhine_remove_one(struct pci_dev *pdev)
|
|
|
+static void rhine_remove_one_pci(struct pci_dev *pdev)
|
|
|
{
|
|
|
struct net_device *dev = pci_get_drvdata(pdev);
|
|
|
struct rhine_private *rp = netdev_priv(dev);
|
|
@@ -2300,7 +2357,21 @@ static void rhine_remove_one(struct pci_dev *pdev)
|
|
|
pci_disable_device(pdev);
|
|
|
}
|
|
|
|
|
|
-static void rhine_shutdown (struct pci_dev *pdev)
|
|
|
+static int rhine_remove_one_platform(struct platform_device *pdev)
|
|
|
+{
|
|
|
+ struct net_device *dev = platform_get_drvdata(pdev);
|
|
|
+ struct rhine_private *rp = netdev_priv(dev);
|
|
|
+
|
|
|
+ unregister_netdev(dev);
|
|
|
+
|
|
|
+ iounmap(rp->base);
|
|
|
+
|
|
|
+ free_netdev(dev);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void rhine_shutdown_pci(struct pci_dev *pdev)
|
|
|
{
|
|
|
struct net_device *dev = pci_get_drvdata(pdev);
|
|
|
struct rhine_private *rp = netdev_priv(dev);
|
|
@@ -2354,8 +2425,7 @@ static void rhine_shutdown (struct pci_dev *pdev)
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
static int rhine_suspend(struct device *device)
|
|
|
{
|
|
|
- struct pci_dev *pdev = to_pci_dev(device);
|
|
|
- struct net_device *dev = pci_get_drvdata(pdev);
|
|
|
+ struct net_device *dev = dev_get_drvdata(device);
|
|
|
struct rhine_private *rp = netdev_priv(dev);
|
|
|
|
|
|
if (!netif_running(dev))
|
|
@@ -2367,15 +2437,15 @@ static int rhine_suspend(struct device *device)
|
|
|
|
|
|
netif_device_detach(dev);
|
|
|
|
|
|
- rhine_shutdown(pdev);
|
|
|
+ if (dev_is_pci(device))
|
|
|
+ rhine_shutdown_pci(to_pci_dev(device));
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static int rhine_resume(struct device *device)
|
|
|
{
|
|
|
- struct pci_dev *pdev = to_pci_dev(device);
|
|
|
- struct net_device *dev = pci_get_drvdata(pdev);
|
|
|
+ struct net_device *dev = dev_get_drvdata(device);
|
|
|
struct rhine_private *rp = netdev_priv(dev);
|
|
|
|
|
|
if (!netif_running(dev))
|
|
@@ -2408,15 +2478,26 @@ static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
|
|
|
|
|
|
#endif /* !CONFIG_PM_SLEEP */
|
|
|
|
|
|
-static struct pci_driver rhine_driver = {
|
|
|
+static struct pci_driver rhine_driver_pci = {
|
|
|
.name = DRV_NAME,
|
|
|
.id_table = rhine_pci_tbl,
|
|
|
- .probe = rhine_init_one,
|
|
|
- .remove = rhine_remove_one,
|
|
|
- .shutdown = rhine_shutdown,
|
|
|
+ .probe = rhine_init_one_pci,
|
|
|
+ .remove = rhine_remove_one_pci,
|
|
|
+ .shutdown = rhine_shutdown_pci,
|
|
|
.driver.pm = RHINE_PM_OPS,
|
|
|
};
|
|
|
|
|
|
+static struct platform_driver rhine_driver_platform = {
|
|
|
+ .probe = rhine_init_one_platform,
|
|
|
+ .remove = rhine_remove_one_platform,
|
|
|
+ .driver = {
|
|
|
+ .name = DRV_NAME,
|
|
|
+ .owner = THIS_MODULE,
|
|
|
+ .of_match_table = rhine_of_tbl,
|
|
|
+ .pm = RHINE_PM_OPS,
|
|
|
+ }
|
|
|
+};
|
|
|
+
|
|
|
static struct dmi_system_id rhine_dmi_table[] __initdata = {
|
|
|
{
|
|
|
.ident = "EPIA-M",
|
|
@@ -2437,6 +2518,8 @@ static struct dmi_system_id rhine_dmi_table[] __initdata = {
|
|
|
|
|
|
static int __init rhine_init(void)
|
|
|
{
|
|
|
+ int ret_pci, ret_platform;
|
|
|
+
|
|
|
/* when a module, this is printed whether or not devices are found in probe */
|
|
|
#ifdef MODULE
|
|
|
pr_info("%s\n", version);
|
|
@@ -2449,13 +2532,19 @@ static int __init rhine_init(void)
|
|
|
else if (avoid_D3)
|
|
|
pr_info("avoid_D3 set\n");
|
|
|
|
|
|
- return pci_register_driver(&rhine_driver);
|
|
|
+ ret_pci = pci_register_driver(&rhine_driver_pci);
|
|
|
+ ret_platform = platform_driver_register(&rhine_driver_platform);
|
|
|
+ if ((ret_pci < 0) && (ret_platform < 0))
|
|
|
+ return ret_pci;
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
|
|
|
static void __exit rhine_cleanup(void)
|
|
|
{
|
|
|
- pci_unregister_driver(&rhine_driver);
|
|
|
+ platform_driver_unregister(&rhine_driver_platform);
|
|
|
+ pci_unregister_driver(&rhine_driver_pci);
|
|
|
}
|
|
|
|
|
|
|