|
@@ -59,6 +59,7 @@
|
|
|
|
|
|
#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
|
|
#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
|
|
#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
|
|
#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
|
|
|
|
+#define MLX4_IB_CARD_REV_A0 0xA0
|
|
|
|
|
|
MODULE_AUTHOR("Roland Dreier");
|
|
MODULE_AUTHOR("Roland Dreier");
|
|
MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
|
|
MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
|
|
@@ -119,6 +120,17 @@ static int check_flow_steering_support(struct mlx4_dev *dev)
|
|
return dmfs;
|
|
return dmfs;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int num_ib_ports(struct mlx4_dev *dev)
|
|
|
|
+{
|
|
|
|
+ int ib_ports = 0;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
|
|
|
|
+ ib_ports++;
|
|
|
|
+
|
|
|
|
+ return ib_ports;
|
|
|
|
+}
|
|
|
|
+
|
|
static int mlx4_ib_query_device(struct ib_device *ibdev,
|
|
static int mlx4_ib_query_device(struct ib_device *ibdev,
|
|
struct ib_device_attr *props)
|
|
struct ib_device_attr *props)
|
|
{
|
|
{
|
|
@@ -126,6 +138,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
|
|
struct ib_smp *in_mad = NULL;
|
|
struct ib_smp *in_mad = NULL;
|
|
struct ib_smp *out_mad = NULL;
|
|
struct ib_smp *out_mad = NULL;
|
|
int err = -ENOMEM;
|
|
int err = -ENOMEM;
|
|
|
|
+ int have_ib_ports;
|
|
|
|
|
|
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
|
|
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
|
|
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
|
|
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
|
|
@@ -142,6 +155,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
|
|
|
|
|
|
memset(props, 0, sizeof *props);
|
|
memset(props, 0, sizeof *props);
|
|
|
|
|
|
|
|
+ have_ib_ports = num_ib_ports(dev->dev);
|
|
|
|
+
|
|
props->fw_ver = dev->dev->caps.fw_ver;
|
|
props->fw_ver = dev->dev->caps.fw_ver;
|
|
props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
|
|
props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
|
|
IB_DEVICE_PORT_ACTIVE_EVENT |
|
|
IB_DEVICE_PORT_ACTIVE_EVENT |
|
|
@@ -152,13 +167,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
|
|
props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
|
|
props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
|
|
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
|
|
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
|
|
props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
|
|
props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
|
|
- if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
|
|
|
|
|
|
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
|
|
props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
|
|
props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
|
|
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
|
|
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
|
|
props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
|
|
props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
|
|
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
|
|
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
|
|
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
|
|
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
|
|
- if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
|
|
|
|
|
|
+ if (dev->dev->caps.max_gso_sz &&
|
|
|
|
+ (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
|
|
|
|
+ (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
|
|
props->device_cap_flags |= IB_DEVICE_UD_TSO;
|
|
props->device_cap_flags |= IB_DEVICE_UD_TSO;
|
|
if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
|
|
if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
|
|
props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
|
|
props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
|
|
@@ -357,7 +374,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
|
|
props->state = IB_PORT_DOWN;
|
|
props->state = IB_PORT_DOWN;
|
|
props->phys_state = state_to_phys_state(props->state);
|
|
props->phys_state = state_to_phys_state(props->state);
|
|
props->active_mtu = IB_MTU_256;
|
|
props->active_mtu = IB_MTU_256;
|
|
- spin_lock(&iboe->lock);
|
|
|
|
|
|
+ spin_lock_bh(&iboe->lock);
|
|
ndev = iboe->netdevs[port - 1];
|
|
ndev = iboe->netdevs[port - 1];
|
|
if (!ndev)
|
|
if (!ndev)
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
@@ -369,7 +386,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
|
|
IB_PORT_ACTIVE : IB_PORT_DOWN;
|
|
IB_PORT_ACTIVE : IB_PORT_DOWN;
|
|
props->phys_state = state_to_phys_state(props->state);
|
|
props->phys_state = state_to_phys_state(props->state);
|
|
out_unlock:
|
|
out_unlock:
|
|
- spin_unlock(&iboe->lock);
|
|
|
|
|
|
+ spin_unlock_bh(&iboe->lock);
|
|
out:
|
|
out:
|
|
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
|
|
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
|
|
return err;
|
|
return err;
|
|
@@ -811,11 +828,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
|
|
if (!mqp->port)
|
|
if (!mqp->port)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- spin_lock(&mdev->iboe.lock);
|
|
|
|
|
|
+ spin_lock_bh(&mdev->iboe.lock);
|
|
ndev = mdev->iboe.netdevs[mqp->port - 1];
|
|
ndev = mdev->iboe.netdevs[mqp->port - 1];
|
|
if (ndev)
|
|
if (ndev)
|
|
dev_hold(ndev);
|
|
dev_hold(ndev);
|
|
- spin_unlock(&mdev->iboe.lock);
|
|
|
|
|
|
+ spin_unlock_bh(&mdev->iboe.lock);
|
|
|
|
|
|
if (ndev) {
|
|
if (ndev) {
|
|
ret = 1;
|
|
ret = 1;
|
|
@@ -1292,11 +1309,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
|
mutex_lock(&mqp->mutex);
|
|
mutex_lock(&mqp->mutex);
|
|
ge = find_gid_entry(mqp, gid->raw);
|
|
ge = find_gid_entry(mqp, gid->raw);
|
|
if (ge) {
|
|
if (ge) {
|
|
- spin_lock(&mdev->iboe.lock);
|
|
|
|
|
|
+ spin_lock_bh(&mdev->iboe.lock);
|
|
ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
|
|
ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
|
|
if (ndev)
|
|
if (ndev)
|
|
dev_hold(ndev);
|
|
dev_hold(ndev);
|
|
- spin_unlock(&mdev->iboe.lock);
|
|
|
|
|
|
+ spin_unlock_bh(&mdev->iboe.lock);
|
|
if (ndev)
|
|
if (ndev)
|
|
dev_put(ndev);
|
|
dev_put(ndev);
|
|
list_del(&ge->list);
|
|
list_del(&ge->list);
|
|
@@ -1417,6 +1434,9 @@ static void update_gids_task(struct work_struct *work)
|
|
int err;
|
|
int err;
|
|
struct mlx4_dev *dev = gw->dev->dev;
|
|
struct mlx4_dev *dev = gw->dev->dev;
|
|
|
|
|
|
|
|
+ if (!gw->dev->ib_active)
|
|
|
|
+ return;
|
|
|
|
+
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
if (IS_ERR(mailbox)) {
|
|
if (IS_ERR(mailbox)) {
|
|
pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
|
|
pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
|
|
@@ -1447,6 +1467,9 @@ static void reset_gids_task(struct work_struct *work)
|
|
int err;
|
|
int err;
|
|
struct mlx4_dev *dev = gw->dev->dev;
|
|
struct mlx4_dev *dev = gw->dev->dev;
|
|
|
|
|
|
|
|
+ if (!gw->dev->ib_active)
|
|
|
|
+ return;
|
|
|
|
+
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
if (IS_ERR(mailbox)) {
|
|
if (IS_ERR(mailbox)) {
|
|
pr_warn("reset gid table failed\n");
|
|
pr_warn("reset gid table failed\n");
|
|
@@ -1581,7 +1604,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
iboe = &ibdev->iboe;
|
|
iboe = &ibdev->iboe;
|
|
- spin_lock(&iboe->lock);
|
|
|
|
|
|
+ spin_lock_bh(&iboe->lock);
|
|
|
|
|
|
for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
|
|
for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
|
|
if ((netif_is_bond_master(real_dev) &&
|
|
if ((netif_is_bond_master(real_dev) &&
|
|
@@ -1591,7 +1614,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
|
|
update_gid_table(ibdev, port, gid,
|
|
update_gid_table(ibdev, port, gid,
|
|
event == NETDEV_DOWN, 0);
|
|
event == NETDEV_DOWN, 0);
|
|
|
|
|
|
- spin_unlock(&iboe->lock);
|
|
|
|
|
|
+ spin_unlock_bh(&iboe->lock);
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
}
|
|
}
|
|
@@ -1664,13 +1687,21 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
|
|
new_smac = mlx4_mac_to_u64(dev->dev_addr);
|
|
new_smac = mlx4_mac_to_u64(dev->dev_addr);
|
|
read_unlock(&dev_base_lock);
|
|
read_unlock(&dev_base_lock);
|
|
|
|
|
|
|
|
+ atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
|
|
|
|
+
|
|
|
|
+ /* no need for update QP1 and mac registration in non-SRIOV */
|
|
|
|
+ if (!mlx4_is_mfunc(ibdev->dev))
|
|
|
|
+ return;
|
|
|
|
+
|
|
mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
|
|
mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
|
|
qp = ibdev->qp1_proxy[port - 1];
|
|
qp = ibdev->qp1_proxy[port - 1];
|
|
if (qp) {
|
|
if (qp) {
|
|
int new_smac_index;
|
|
int new_smac_index;
|
|
- u64 old_smac = qp->pri.smac;
|
|
|
|
|
|
+ u64 old_smac;
|
|
struct mlx4_update_qp_params update_params;
|
|
struct mlx4_update_qp_params update_params;
|
|
|
|
|
|
|
|
+ mutex_lock(&qp->mutex);
|
|
|
|
+ old_smac = qp->pri.smac;
|
|
if (new_smac == old_smac)
|
|
if (new_smac == old_smac)
|
|
goto unlock;
|
|
goto unlock;
|
|
|
|
|
|
@@ -1680,22 +1711,25 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
|
|
goto unlock;
|
|
goto unlock;
|
|
|
|
|
|
update_params.smac_index = new_smac_index;
|
|
update_params.smac_index = new_smac_index;
|
|
- if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
|
|
|
|
|
|
+ if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
|
|
&update_params)) {
|
|
&update_params)) {
|
|
release_mac = new_smac;
|
|
release_mac = new_smac;
|
|
goto unlock;
|
|
goto unlock;
|
|
}
|
|
}
|
|
-
|
|
|
|
|
|
+ /* if old port was zero, no mac was yet registered for this QP */
|
|
|
|
+ if (qp->pri.smac_port)
|
|
|
|
+ release_mac = old_smac;
|
|
qp->pri.smac = new_smac;
|
|
qp->pri.smac = new_smac;
|
|
|
|
+ qp->pri.smac_port = port;
|
|
qp->pri.smac_index = new_smac_index;
|
|
qp->pri.smac_index = new_smac_index;
|
|
-
|
|
|
|
- release_mac = old_smac;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
unlock:
|
|
unlock:
|
|
- mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
|
|
|
|
if (release_mac != MLX4_IB_INVALID_MAC)
|
|
if (release_mac != MLX4_IB_INVALID_MAC)
|
|
mlx4_unregister_mac(ibdev->dev, port, release_mac);
|
|
mlx4_unregister_mac(ibdev->dev, port, release_mac);
|
|
|
|
+ if (qp)
|
|
|
|
+ mutex_unlock(&qp->mutex);
|
|
|
|
+ mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
|
|
}
|
|
}
|
|
|
|
|
|
static void mlx4_ib_get_dev_addr(struct net_device *dev,
|
|
static void mlx4_ib_get_dev_addr(struct net_device *dev,
|
|
@@ -1706,6 +1740,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
|
|
struct inet6_dev *in6_dev;
|
|
struct inet6_dev *in6_dev;
|
|
union ib_gid *pgid;
|
|
union ib_gid *pgid;
|
|
struct inet6_ifaddr *ifp;
|
|
struct inet6_ifaddr *ifp;
|
|
|
|
+ union ib_gid default_gid;
|
|
#endif
|
|
#endif
|
|
union ib_gid gid;
|
|
union ib_gid gid;
|
|
|
|
|
|
@@ -1726,12 +1761,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
|
|
in_dev_put(in_dev);
|
|
in_dev_put(in_dev);
|
|
}
|
|
}
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
+ mlx4_make_default_gid(dev, &default_gid);
|
|
/* IPv6 gids */
|
|
/* IPv6 gids */
|
|
in6_dev = in6_dev_get(dev);
|
|
in6_dev = in6_dev_get(dev);
|
|
if (in6_dev) {
|
|
if (in6_dev) {
|
|
read_lock_bh(&in6_dev->lock);
|
|
read_lock_bh(&in6_dev->lock);
|
|
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
|
|
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
|
|
pgid = (union ib_gid *)&ifp->addr;
|
|
pgid = (union ib_gid *)&ifp->addr;
|
|
|
|
+ if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
|
|
|
|
+ continue;
|
|
update_gid_table(ibdev, port, pgid, 0, 0);
|
|
update_gid_table(ibdev, port, pgid, 0, 0);
|
|
}
|
|
}
|
|
read_unlock_bh(&in6_dev->lock);
|
|
read_unlock_bh(&in6_dev->lock);
|
|
@@ -1753,24 +1791,33 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
|
|
struct net_device *dev;
|
|
struct net_device *dev;
|
|
struct mlx4_ib_iboe *iboe = &ibdev->iboe;
|
|
struct mlx4_ib_iboe *iboe = &ibdev->iboe;
|
|
int i;
|
|
int i;
|
|
|
|
+ int err = 0;
|
|
|
|
|
|
- for (i = 1; i <= ibdev->num_ports; ++i)
|
|
|
|
- if (reset_gid_table(ibdev, i))
|
|
|
|
- return -1;
|
|
|
|
|
|
+ for (i = 1; i <= ibdev->num_ports; ++i) {
|
|
|
|
+ if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ==
|
|
|
|
+ IB_LINK_LAYER_ETHERNET) {
|
|
|
|
+ err = reset_gid_table(ibdev, i);
|
|
|
|
+ if (err)
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
|
|
read_lock(&dev_base_lock);
|
|
read_lock(&dev_base_lock);
|
|
- spin_lock(&iboe->lock);
|
|
|
|
|
|
+ spin_lock_bh(&iboe->lock);
|
|
|
|
|
|
for_each_netdev(&init_net, dev) {
|
|
for_each_netdev(&init_net, dev) {
|
|
u8 port = mlx4_ib_get_dev_port(dev, ibdev);
|
|
u8 port = mlx4_ib_get_dev_port(dev, ibdev);
|
|
- if (port)
|
|
|
|
|
|
+ /* port will be non-zero only for ETH ports */
|
|
|
|
+ if (port) {
|
|
|
|
+ mlx4_ib_set_default_gid(ibdev, dev, port);
|
|
mlx4_ib_get_dev_addr(dev, ibdev, port);
|
|
mlx4_ib_get_dev_addr(dev, ibdev, port);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
- spin_unlock(&iboe->lock);
|
|
|
|
|
|
+ spin_unlock_bh(&iboe->lock);
|
|
read_unlock(&dev_base_lock);
|
|
read_unlock(&dev_base_lock);
|
|
-
|
|
|
|
- return 0;
|
|
|
|
|
|
+out:
|
|
|
|
+ return err;
|
|
}
|
|
}
|
|
|
|
|
|
static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
|
|
static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
|
|
@@ -1784,7 +1831,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
|
|
|
|
|
|
iboe = &ibdev->iboe;
|
|
iboe = &ibdev->iboe;
|
|
|
|
|
|
- spin_lock(&iboe->lock);
|
|
|
|
|
|
+ spin_lock_bh(&iboe->lock);
|
|
mlx4_foreach_ib_transport_port(port, ibdev->dev) {
|
|
mlx4_foreach_ib_transport_port(port, ibdev->dev) {
|
|
enum ib_port_state port_state = IB_PORT_NOP;
|
|
enum ib_port_state port_state = IB_PORT_NOP;
|
|
struct net_device *old_master = iboe->masters[port - 1];
|
|
struct net_device *old_master = iboe->masters[port - 1];
|
|
@@ -1816,35 +1863,47 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
|
|
port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
|
|
port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
|
|
IB_PORT_ACTIVE : IB_PORT_DOWN;
|
|
IB_PORT_ACTIVE : IB_PORT_DOWN;
|
|
mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
|
|
mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
|
|
- } else {
|
|
|
|
- reset_gid_table(ibdev, port);
|
|
|
|
- }
|
|
|
|
- /* if using bonding/team and a slave port is down, we don't the bond IP
|
|
|
|
- * based gids in the table since flows that select port by gid may get
|
|
|
|
- * the down port.
|
|
|
|
- */
|
|
|
|
- if (curr_master && (port_state == IB_PORT_DOWN)) {
|
|
|
|
- reset_gid_table(ibdev, port);
|
|
|
|
- mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
|
|
|
|
- }
|
|
|
|
- /* if bonding is used it is possible that we add it to masters
|
|
|
|
- * only after IP address is assigned to the net bonding
|
|
|
|
- * interface.
|
|
|
|
- */
|
|
|
|
- if (curr_master && (old_master != curr_master)) {
|
|
|
|
- reset_gid_table(ibdev, port);
|
|
|
|
- mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
|
|
|
|
- mlx4_ib_get_dev_addr(curr_master, ibdev, port);
|
|
|
|
- }
|
|
|
|
|
|
+ if (curr_master) {
|
|
|
|
+ /* if using bonding/team and a slave port is down, we
|
|
|
|
+ * don't want the bond IP based gids in the table since
|
|
|
|
+ * flows that select port by gid may get the down port.
|
|
|
|
+ */
|
|
|
|
+ if (port_state == IB_PORT_DOWN) {
|
|
|
|
+ reset_gid_table(ibdev, port);
|
|
|
|
+ mlx4_ib_set_default_gid(ibdev,
|
|
|
|
+ curr_netdev,
|
|
|
|
+ port);
|
|
|
|
+ } else {
|
|
|
|
+ /* gids from the upper dev (bond/team)
|
|
|
|
+ * should appear in port's gid table
|
|
|
|
+ */
|
|
|
|
+ mlx4_ib_get_dev_addr(curr_master,
|
|
|
|
+ ibdev, port);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ /* if bonding is used it is possible that we add it to
|
|
|
|
+ * masters only after IP address is assigned to the
|
|
|
|
+ * net bonding interface.
|
|
|
|
+ */
|
|
|
|
+ if (curr_master && (old_master != curr_master)) {
|
|
|
|
+ reset_gid_table(ibdev, port);
|
|
|
|
+ mlx4_ib_set_default_gid(ibdev,
|
|
|
|
+ curr_netdev, port);
|
|
|
|
+ mlx4_ib_get_dev_addr(curr_master, ibdev, port);
|
|
|
|
+ }
|
|
|
|
|
|
- if (!curr_master && (old_master != curr_master)) {
|
|
|
|
|
|
+ if (!curr_master && (old_master != curr_master)) {
|
|
|
|
+ reset_gid_table(ibdev, port);
|
|
|
|
+ mlx4_ib_set_default_gid(ibdev,
|
|
|
|
+ curr_netdev, port);
|
|
|
|
+ mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
reset_gid_table(ibdev, port);
|
|
reset_gid_table(ibdev, port);
|
|
- mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
|
|
|
|
- mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
|
|
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- spin_unlock(&iboe->lock);
|
|
|
|
|
|
+ spin_unlock_bh(&iboe->lock);
|
|
|
|
|
|
if (update_qps_port > 0)
|
|
if (update_qps_port > 0)
|
|
mlx4_ib_update_qps(ibdev, dev, update_qps_port);
|
|
mlx4_ib_update_qps(ibdev, dev, update_qps_port);
|
|
@@ -2186,6 +2245,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|
goto err_steer_free_bitmap;
|
|
goto err_steer_free_bitmap;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
|
|
|
|
+ atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
|
|
|
|
+
|
|
if (ib_register_device(&ibdev->ib_dev, NULL))
|
|
if (ib_register_device(&ibdev->ib_dev, NULL))
|
|
goto err_steer_free_bitmap;
|
|
goto err_steer_free_bitmap;
|
|
|
|
|
|
@@ -2222,12 +2284,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
- for (i = 1 ; i <= ibdev->num_ports ; ++i)
|
|
|
|
- reset_gid_table(ibdev, i);
|
|
|
|
- rtnl_lock();
|
|
|
|
- mlx4_ib_scan_netdevs(ibdev, NULL, 0);
|
|
|
|
- rtnl_unlock();
|
|
|
|
- mlx4_ib_init_gid_table(ibdev);
|
|
|
|
|
|
+ if (mlx4_ib_init_gid_table(ibdev))
|
|
|
|
+ goto err_notif;
|
|
}
|
|
}
|
|
|
|
|
|
for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
|
|
for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
|
|
@@ -2375,6 +2433,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
|
|
struct mlx4_ib_dev *ibdev = ibdev_ptr;
|
|
struct mlx4_ib_dev *ibdev = ibdev_ptr;
|
|
int p;
|
|
int p;
|
|
|
|
|
|
|
|
+ ibdev->ib_active = false;
|
|
|
|
+ flush_workqueue(wq);
|
|
|
|
+
|
|
mlx4_ib_close_sriov(ibdev);
|
|
mlx4_ib_close_sriov(ibdev);
|
|
mlx4_ib_mad_cleanup(ibdev);
|
|
mlx4_ib_mad_cleanup(ibdev);
|
|
ib_unregister_device(&ibdev->ib_dev);
|
|
ib_unregister_device(&ibdev->ib_dev);
|