|
@@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
|
|
|
{
|
|
|
struct mlx4_cq *cq;
|
|
|
|
|
|
+ rcu_read_lock();
|
|
|
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
|
|
|
cqn & (dev->caps.num_cqs - 1));
|
|
|
+ rcu_read_unlock();
|
|
|
+
|
|
|
if (!cq) {
|
|
|
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
|
|
|
+ * the CQ is freed only after interrupt handling is completed.
|
|
|
+ */
|
|
|
++cq->arm_sn;
|
|
|
|
|
|
cq->comp(cq);
|
|
@@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
|
|
|
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
|
|
|
struct mlx4_cq *cq;
|
|
|
|
|
|
- spin_lock(&cq_table->lock);
|
|
|
-
|
|
|
+ rcu_read_lock();
|
|
|
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
|
|
|
- if (cq)
|
|
|
- atomic_inc(&cq->refcount);
|
|
|
-
|
|
|
- spin_unlock(&cq_table->lock);
|
|
|
+ rcu_read_unlock();
|
|
|
|
|
|
if (!cq) {
|
|
|
- mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
|
|
|
+ mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
|
|
|
+ * the CQ is freed only after interrupt handling is completed.
|
|
|
+ */
|
|
|
cq->event(cq, event_type);
|
|
|
-
|
|
|
- if (atomic_dec_and_test(&cq->refcount))
|
|
|
- complete(&cq->free);
|
|
|
}
|
|
|
|
|
|
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
|
|
@@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
|
|
|
if (err)
|
|
|
return err;
|
|
|
|
|
|
- spin_lock_irq(&cq_table->lock);
|
|
|
+ spin_lock(&cq_table->lock);
|
|
|
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
|
|
|
- spin_unlock_irq(&cq_table->lock);
|
|
|
+ spin_unlock(&cq_table->lock);
|
|
|
if (err)
|
|
|
goto err_icm;
|
|
|
|
|
@@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
|
|
|
return 0;
|
|
|
|
|
|
err_radix:
|
|
|
- spin_lock_irq(&cq_table->lock);
|
|
|
+ spin_lock(&cq_table->lock);
|
|
|
radix_tree_delete(&cq_table->tree, cq->cqn);
|
|
|
- spin_unlock_irq(&cq_table->lock);
|
|
|
+ spin_unlock(&cq_table->lock);
|
|
|
|
|
|
err_icm:
|
|
|
mlx4_cq_free_icm(dev, cq->cqn);
|
|
@@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
|
|
|
if (err)
|
|
|
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
|
|
|
|
|
|
+ spin_lock(&cq_table->lock);
|
|
|
+ radix_tree_delete(&cq_table->tree, cq->cqn);
|
|
|
+ spin_unlock(&cq_table->lock);
|
|
|
+
|
|
|
synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
|
|
|
if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
|
|
|
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
|
|
|
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
|
|
|
|
|
|
- spin_lock_irq(&cq_table->lock);
|
|
|
- radix_tree_delete(&cq_table->tree, cq->cqn);
|
|
|
- spin_unlock_irq(&cq_table->lock);
|
|
|
-
|
|
|
if (atomic_dec_and_test(&cq->refcount))
|
|
|
complete(&cq->free);
|
|
|
wait_for_completion(&cq->free);
|