|
@@ -52,6 +52,51 @@
|
|
#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
|
|
#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
|
|
#define MLX4_EQ_STATE_FIRED (10 << 8)
|
|
#define MLX4_EQ_STATE_FIRED (10 << 8)
|
|
|
|
|
|
|
|
+#define TASKLET_MAX_TIME 2
|
|
|
|
+#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
|
|
|
|
+
|
|
|
|
+void mlx4_cq_tasklet_cb(unsigned long data)
|
|
|
|
+{
|
|
|
|
+ unsigned long flags;
|
|
|
|
+ unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
|
|
|
|
+ struct mlx4_eq_tasklet *ctx = (struct mlx4_eq_tasklet *)data;
|
|
|
|
+ struct mlx4_cq *mcq, *temp;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&ctx->lock, flags);
|
|
|
|
+ list_splice_tail_init(&ctx->list, &ctx->process_list);
|
|
|
|
+ spin_unlock_irqrestore(&ctx->lock, flags);
|
|
|
|
+
|
|
|
|
+ list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) {
|
|
|
|
+ list_del_init(&mcq->tasklet_ctx.list);
|
|
|
|
+ mcq->tasklet_ctx.comp(mcq);
|
|
|
|
+ if (atomic_dec_and_test(&mcq->refcount))
|
|
|
|
+ complete(&mcq->free);
|
|
|
|
+ if (time_after(jiffies, end))
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!list_empty(&ctx->process_list))
|
|
|
|
+ tasklet_schedule(&ctx->task);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
|
|
|
|
+{
|
|
|
|
+ unsigned long flags;
|
|
|
|
+ struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&tasklet_ctx->lock, flags);
|
|
|
|
+ /* When migrating CQs between EQs will be implemented, please note
|
|
|
|
+ * that you need to sync this point. It is possible that
|
|
|
|
+ * while migrating a CQ, completions on the old EQs could
|
|
|
|
+ * still arrive.
|
|
|
|
+ */
|
|
|
|
+ if (list_empty_careful(&cq->tasklet_ctx.list)) {
|
|
|
|
+ atomic_inc(&cq->refcount);
|
|
|
|
+ list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
|
|
|
|
+ }
|
|
|
|
+ spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
|
|
|
|
+}
|
|
|
|
+
|
|
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
|
|
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
|
|
{
|
|
{
|
|
struct mlx4_cq *cq;
|
|
struct mlx4_cq *cq;
|
|
@@ -292,6 +337,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
|
|
cq->uar = uar;
|
|
cq->uar = uar;
|
|
atomic_set(&cq->refcount, 1);
|
|
atomic_set(&cq->refcount, 1);
|
|
init_completion(&cq->free);
|
|
init_completion(&cq->free);
|
|
|
|
+ cq->comp = mlx4_add_cq_to_tasklet;
|
|
|
|
+ cq->tasklet_ctx.priv =
|
|
|
|
+ &priv->eq_table.eq[cq->vector].tasklet_ctx;
|
|
|
|
+ INIT_LIST_HEAD(&cq->tasklet_ctx.list);
|
|
|
|
+
|
|
|
|
|
|
cq->irq = priv->eq_table.eq[cq->vector].irq;
|
|
cq->irq = priv->eq_table.eq[cq->vector].irq;
|
|
return 0;
|
|
return 0;
|