|
@@ -3556,6 +3556,14 @@ mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
|
|
|
return netdev;
|
|
|
}
|
|
|
|
|
|
+const struct cpumask *mlx5_ib_get_vector_affinity(struct ib_device *ibdev,
|
|
|
+ int comp_vector)
|
|
|
+{
|
|
|
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
|
|
+
|
|
|
+ return mlx5_get_vector_affinity(dev->mdev, comp_vector);
|
|
|
+}
|
|
|
+
|
|
|
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|
|
{
|
|
|
struct mlx5_ib_dev *dev;
|
|
@@ -3686,6 +3694,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|
|
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
|
|
|
dev->ib_dev.get_port_immutable = mlx5_port_immutable;
|
|
|
dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
|
|
|
+ dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
|
|
|
if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
|
|
|
dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
|
|
|
|