|
@@ -2541,6 +2541,41 @@ static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
|
|
|
unregister_netdevice_notifier(&dev->roce.nb);
|
|
|
}
|
|
|
|
|
|
+static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
|
|
|
+{
|
|
|
+ unsigned int i;
|
|
|
+
|
|
|
+ for (i = 0; i < dev->num_ports; i++)
|
|
|
+ mlx5_core_dealloc_q_counter(dev->mdev,
|
|
|
+ dev->port[i].q_cnt_id);
|
|
|
+}
|
|
|
+
|
|
|
+static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ for (i = 0; i < dev->num_ports; i++) {
|
|
|
+ ret = mlx5_core_alloc_q_counter(dev->mdev,
|
|
|
+ &dev->port[i].q_cnt_id);
|
|
|
+ if (ret) {
|
|
|
+ mlx5_ib_warn(dev,
|
|
|
+ "couldn't allocate queue counter for port %d, err %d\n",
|
|
|
+ i + 1, ret);
|
|
|
+ goto dealloc_counters;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+dealloc_counters:
|
|
|
+ while (--i >= 0)
|
|
|
+ mlx5_core_dealloc_q_counter(dev->mdev,
|
|
|
+ dev->port[i].q_cnt_id);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|
|
{
|
|
|
struct mlx5_ib_dev *dev;
|
|
@@ -2563,10 +2598,15 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|
|
|
|
|
dev->mdev = mdev;
|
|
|
|
|
|
+ dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!dev->port)
|
|
|
+ goto err_dealloc;
|
|
|
+
|
|
|
rwlock_init(&dev->roce.netdev_lock);
|
|
|
err = get_port_caps(dev);
|
|
|
if (err)
|
|
|
- goto err_dealloc;
|
|
|
+ goto err_free_port;
|
|
|
|
|
|
if (mlx5_use_mad_ifc(dev))
|
|
|
get_ext_port_caps(dev);
|
|
@@ -2729,10 +2769,14 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|
|
if (err)
|
|
|
goto err_rsrc;
|
|
|
|
|
|
- err = ib_register_device(&dev->ib_dev, NULL);
|
|
|
+ err = mlx5_ib_alloc_q_counters(dev);
|
|
|
if (err)
|
|
|
goto err_odp;
|
|
|
|
|
|
+ err = ib_register_device(&dev->ib_dev, NULL);
|
|
|
+ if (err)
|
|
|
+ goto err_q_cnt;
|
|
|
+
|
|
|
err = create_umr_res(dev);
|
|
|
if (err)
|
|
|
goto err_dev;
|
|
@@ -2754,6 +2798,9 @@ err_umrc:
|
|
|
err_dev:
|
|
|
ib_unregister_device(&dev->ib_dev);
|
|
|
|
|
|
+err_q_cnt:
|
|
|
+ mlx5_ib_dealloc_q_counters(dev);
|
|
|
+
|
|
|
err_odp:
|
|
|
mlx5_ib_odp_remove_one(dev);
|
|
|
|
|
@@ -2764,6 +2811,9 @@ err_disable_roce:
|
|
|
if (ll == IB_LINK_LAYER_ETHERNET)
|
|
|
mlx5_disable_roce(dev);
|
|
|
|
|
|
+err_free_port:
|
|
|
+ kfree(dev->port);
|
|
|
+
|
|
|
err_dealloc:
|
|
|
ib_dealloc_device((struct ib_device *)dev);
|
|
|
|
|
@@ -2776,11 +2826,13 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
|
|
|
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
|
|
|
|
|
|
ib_unregister_device(&dev->ib_dev);
|
|
|
+ mlx5_ib_dealloc_q_counters(dev);
|
|
|
destroy_umrc_res(dev);
|
|
|
mlx5_ib_odp_remove_one(dev);
|
|
|
destroy_dev_resources(&dev->devr);
|
|
|
if (ll == IB_LINK_LAYER_ETHERNET)
|
|
|
mlx5_disable_roce(dev);
|
|
|
+ kfree(dev->port);
|
|
|
ib_dealloc_device(&dev->ib_dev);
|
|
|
}
|
|
|
|