|
@@ -3970,6 +3970,19 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void mlx5e_unregister_vport_rep(struct mlx5_core_dev *mdev)
|
|
|
+{
|
|
|
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
|
|
+ int total_vfs = MLX5_TOTAL_VPORTS(mdev);
|
|
|
+ int vport;
|
|
|
+
|
|
|
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
|
|
|
+ return;
|
|
|
+
|
|
|
+ for (vport = 1; vport < total_vfs; vport++)
|
|
|
+ mlx5_eswitch_unregister_vport_rep(esw, vport);
|
|
|
+}
|
|
|
+
|
|
|
void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
|
|
|
{
|
|
|
struct mlx5e_priv *priv = netdev_priv(netdev);
|
|
@@ -4016,6 +4029,7 @@ static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+ mlx5e_register_vport_rep(mdev);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -4027,6 +4041,7 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
|
|
|
if (!netif_device_present(netdev))
|
|
|
return;
|
|
|
|
|
|
+ mlx5e_unregister_vport_rep(mdev);
|
|
|
mlx5e_detach_netdev(mdev, netdev);
|
|
|
mlx5e_destroy_mdev_resources(mdev);
|
|
|
}
|
|
@@ -4045,8 +4060,6 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
|
|
|
if (err)
|
|
|
return NULL;
|
|
|
|
|
|
- mlx5e_register_vport_rep(mdev);
|
|
|
-
|
|
|
if (MLX5_CAP_GEN(mdev, vport_group_manager))
|
|
|
ppriv = &esw->offloads.vport_reps[0];
|
|
|
|
|
@@ -4098,13 +4111,7 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
|
|
|
|
|
|
static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
|
|
|
{
|
|
|
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
|
|
- int total_vfs = MLX5_TOTAL_VPORTS(mdev);
|
|
|
struct mlx5e_priv *priv = vpriv;
|
|
|
- int vport;
|
|
|
-
|
|
|
- for (vport = 1; vport < total_vfs; vport++)
|
|
|
- mlx5_eswitch_unregister_vport_rep(esw, vport);
|
|
|
|
|
|
unregister_netdev(priv->netdev);
|
|
|
mlx5e_detach(mdev, vpriv);
|