|
@@ -1262,15 +1262,13 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
|
|
|
{
|
|
|
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
|
|
|
|
|
- if (ipoib_neigh_hash_init(priv) < 0)
|
|
|
- goto out;
|
|
|
/* Allocate RX/TX "rings" to hold queued skbs */
|
|
|
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
|
|
|
GFP_KERNEL);
|
|
|
if (!priv->rx_ring) {
|
|
|
printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
|
|
|
ca->name, ipoib_recvq_size);
|
|
|
- goto out_neigh_hash_cleanup;
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
|
|
@@ -1285,16 +1283,24 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
|
|
|
if (ipoib_ib_dev_init(dev, ca, port))
|
|
|
goto out_tx_ring_cleanup;
|
|
|
|
|
|
+ /*
|
|
|
+ * Must be after ipoib_ib_dev_init so we can allocate a per
|
|
|
+ * device wq there and use it here
|
|
|
+ */
|
|
|
+ if (ipoib_neigh_hash_init(priv) < 0)
|
|
|
+ goto out_dev_uninit;
|
|
|
+
|
|
|
return 0;
|
|
|
|
|
|
+out_dev_uninit:
|
|
|
+ ipoib_ib_dev_cleanup();
|
|
|
+
|
|
|
out_tx_ring_cleanup:
|
|
|
vfree(priv->tx_ring);
|
|
|
|
|
|
out_rx_ring_cleanup:
|
|
|
kfree(priv->rx_ring);
|
|
|
|
|
|
-out_neigh_hash_cleanup:
|
|
|
- ipoib_neigh_hash_uninit(dev);
|
|
|
out:
|
|
|
return -ENOMEM;
|
|
|
}
|
|
@@ -1317,6 +1323,12 @@ void ipoib_dev_cleanup(struct net_device *dev)
|
|
|
}
|
|
|
unregister_netdevice_many(&head);
|
|
|
|
|
|
+ /*
|
|
|
+ * Must be before ipoib_ib_dev_cleanup or we delete an in use
|
|
|
+ * work queue
|
|
|
+ */
|
|
|
+ ipoib_neigh_hash_uninit(dev);
|
|
|
+
|
|
|
ipoib_ib_dev_cleanup(dev);
|
|
|
|
|
|
kfree(priv->rx_ring);
|
|
@@ -1324,8 +1336,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
|
|
|
|
|
|
priv->rx_ring = NULL;
|
|
|
priv->tx_ring = NULL;
|
|
|
-
|
|
|
- ipoib_neigh_hash_uninit(dev);
|
|
|
}
|
|
|
|
|
|
static const struct header_ops ipoib_header_ops = {
|