|
@@ -1,5 +1,5 @@
|
|
|
/*
|
|
|
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
|
|
|
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
|
|
|
*
|
|
|
* This software is available to you under a choice of one of two
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
@@ -48,11 +48,11 @@
|
|
|
#include "mlx5_core.h"
|
|
|
|
|
|
#define DRIVER_NAME "mlx5_core"
|
|
|
-#define DRIVER_VERSION "2.2-1"
|
|
|
-#define DRIVER_RELDATE "Feb 2014"
|
|
|
+#define DRIVER_VERSION "3.0"
|
|
|
+#define DRIVER_RELDATE "January 2015"
|
|
|
|
|
|
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
|
|
|
-MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library");
|
|
|
+MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
|
MODULE_VERSION(DRIVER_VERSION);
|
|
|
|
|
@@ -288,8 +288,6 @@ static void copy_rw_fields(void *to, struct mlx5_caps *from)
|
|
|
MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
|
|
|
MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
|
|
|
MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
|
|
|
- MLX5_SET(cmd_hca_cap, to, log_max_ra_req_dc, from->gen.log_max_ra_req_dc);
|
|
|
- MLX5_SET(cmd_hca_cap, to, log_max_ra_res_dc, from->gen.log_max_ra_res_dc);
|
|
|
MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
|
|
|
MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12);
|
|
|
v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
|
|
@@ -509,6 +507,87 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
|
|
|
+{
|
|
|
+ struct mlx5_eq_table *table = &dev->priv.eq_table;
|
|
|
+ struct mlx5_eq *eq, *n;
|
|
|
+ int err = -ENOENT;
|
|
|
+
|
|
|
+ spin_lock(&table->lock);
|
|
|
+ list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
|
|
|
+ if (eq->index == vector) {
|
|
|
+ *eqn = eq->eqn;
|
|
|
+ *irqn = eq->irqn;
|
|
|
+ err = 0;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ spin_unlock(&table->lock);
|
|
|
+
|
|
|
+ return err;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(mlx5_vector2eqn);
|
|
|
+
|
|
|
+static void free_comp_eqs(struct mlx5_core_dev *dev)
|
|
|
+{
|
|
|
+ struct mlx5_eq_table *table = &dev->priv.eq_table;
|
|
|
+ struct mlx5_eq *eq, *n;
|
|
|
+
|
|
|
+ spin_lock(&table->lock);
|
|
|
+ list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
|
|
|
+ list_del(&eq->list);
|
|
|
+ spin_unlock(&table->lock);
|
|
|
+ if (mlx5_destroy_unmap_eq(dev, eq))
|
|
|
+ mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
|
|
|
+ eq->eqn);
|
|
|
+ kfree(eq);
|
|
|
+ spin_lock(&table->lock);
|
|
|
+ }
|
|
|
+ spin_unlock(&table->lock);
|
|
|
+}
|
|
|
+
|
|
|
+static int alloc_comp_eqs(struct mlx5_core_dev *dev)
|
|
|
+{
|
|
|
+ struct mlx5_eq_table *table = &dev->priv.eq_table;
|
|
|
+ char name[MLX5_MAX_EQ_NAME];
|
|
|
+ struct mlx5_eq *eq;
|
|
|
+ int ncomp_vec;
|
|
|
+ int nent;
|
|
|
+ int err;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(&table->comp_eqs_list);
|
|
|
+ ncomp_vec = table->num_comp_vectors;
|
|
|
+ nent = MLX5_COMP_EQ_SIZE;
|
|
|
+ for (i = 0; i < ncomp_vec; i++) {
|
|
|
+ eq = kzalloc(sizeof(*eq), GFP_KERNEL);
|
|
|
+ if (!eq) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto clean;
|
|
|
+ }
|
|
|
+
|
|
|
+ snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
|
|
|
+ err = mlx5_create_map_eq(dev, eq,
|
|
|
+ i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
|
|
|
+ name, &dev->priv.uuari.uars[0]);
|
|
|
+ if (err) {
|
|
|
+ kfree(eq);
|
|
|
+ goto clean;
|
|
|
+ }
|
|
|
+ mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
|
|
|
+ eq->index = i;
|
|
|
+ spin_lock(&table->lock);
|
|
|
+ list_add_tail(&eq->list, &table->comp_eqs_list);
|
|
|
+ spin_unlock(&table->lock);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+clean:
|
|
|
+ free_comp_eqs(dev);
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
|
|
|
{
|
|
|
struct mlx5_priv *priv = &dev->priv;
|
|
@@ -645,6 +724,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
|
|
|
goto err_free_uar;
|
|
|
}
|
|
|
|
|
|
+ err = alloc_comp_eqs(dev);
|
|
|
+ if (err) {
|
|
|
+ dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
|
|
|
+ goto err_stop_eqs;
|
|
|
+ }
|
|
|
+
|
|
|
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
|
|
|
|
|
|
mlx5_init_cq_table(dev);
|
|
@@ -654,6 +739,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
+err_stop_eqs:
|
|
|
+ mlx5_stop_eqs(dev);
|
|
|
+
|
|
|
err_free_uar:
|
|
|
mlx5_free_uuars(dev, &priv->uuari);
|
|
|
|
|
@@ -705,6 +793,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
|
|
|
mlx5_cleanup_srq_table(dev);
|
|
|
mlx5_cleanup_qp_table(dev);
|
|
|
mlx5_cleanup_cq_table(dev);
|
|
|
+ free_comp_eqs(dev);
|
|
|
mlx5_stop_eqs(dev);
|
|
|
mlx5_free_uuars(dev, &priv->uuari);
|
|
|
mlx5_eq_cleanup(dev);
|
|
@@ -819,6 +908,28 @@ void mlx5_unregister_interface(struct mlx5_interface *intf)
|
|
|
}
|
|
|
EXPORT_SYMBOL(mlx5_unregister_interface);
|
|
|
|
|
|
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
|
|
|
+{
|
|
|
+ struct mlx5_priv *priv = &mdev->priv;
|
|
|
+ struct mlx5_device_context *dev_ctx;
|
|
|
+ unsigned long flags;
|
|
|
+ void *result = NULL;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&priv->ctx_lock, flags);
|
|
|
+
|
|
|
+ list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
|
|
|
+ if ((dev_ctx->intf->protocol == protocol) &&
|
|
|
+ dev_ctx->intf->get_dev) {
|
|
|
+ result = dev_ctx->intf->get_dev(dev_ctx->context);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
|
|
|
+
|
|
|
+ return result;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(mlx5_get_protocol_dev);
|
|
|
+
|
|
|
static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
|
|
|
unsigned long param)
|
|
|
{
|