|
|
@@ -77,35 +77,41 @@ static void add_delayed_event(struct mlx5_priv *priv,
|
|
|
list_add_tail(&delayed_event->list, &priv->waiting_events_list);
|
|
|
}
|
|
|
|
|
|
-static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx,
|
|
|
- struct mlx5_core_dev *dev,
|
|
|
- struct mlx5_priv *priv)
|
|
|
+static void delayed_event_release(struct mlx5_device_context *dev_ctx,
|
|
|
+ struct mlx5_priv *priv)
|
|
|
{
|
|
|
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
|
|
|
struct mlx5_delayed_event *de;
|
|
|
struct mlx5_delayed_event *n;
|
|
|
+ struct list_head temp;
|
|
|
|
|
|
- /* stop delaying events */
|
|
|
- priv->is_accum_events = false;
|
|
|
+ INIT_LIST_HEAD(&temp);
|
|
|
+
|
|
|
+ spin_lock_irq(&priv->ctx_lock);
|
|
|
|
|
|
- /* fire all accumulated events before new event comes */
|
|
|
- list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
|
|
|
+ priv->is_accum_events = false;
|
|
|
+ list_splice_init(&priv->waiting_events_list, &temp);
|
|
|
+ if (!dev_ctx->context)
|
|
|
+ goto out;
|
|
|
+ list_for_each_entry_safe(de, n, &priv->waiting_events_list, list)
|
|
|
dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
|
|
|
+
|
|
|
+out:
|
|
|
+ spin_unlock_irq(&priv->ctx_lock);
|
|
|
+
|
|
|
+ list_for_each_entry_safe(de, n, &temp, list) {
|
|
|
list_del(&de->list);
|
|
|
kfree(de);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void cleanup_delayed_evets(struct mlx5_priv *priv)
|
|
|
+/* accumulating events that can come after mlx5_ib calls to
|
|
|
+ * ib_register_device, till adding that interface to the events list.
|
|
|
+ */
|
|
|
+static void delayed_event_start(struct mlx5_priv *priv)
|
|
|
{
|
|
|
- struct mlx5_delayed_event *de;
|
|
|
- struct mlx5_delayed_event *n;
|
|
|
-
|
|
|
spin_lock_irq(&priv->ctx_lock);
|
|
|
- priv->is_accum_events = false;
|
|
|
- list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
|
|
|
- list_del(&de->list);
|
|
|
- kfree(de);
|
|
|
- }
|
|
|
+ priv->is_accum_events = true;
|
|
|
spin_unlock_irq(&priv->ctx_lock);
|
|
|
}
|
|
|
|
|
|
@@ -122,11 +128,8 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
|
|
|
return;
|
|
|
|
|
|
dev_ctx->intf = intf;
|
|
|
- /* accumulating events that can come after mlx5_ib calls to
|
|
|
- * ib_register_device, till adding that interface to the events list.
|
|
|
- */
|
|
|
|
|
|
- priv->is_accum_events = true;
|
|
|
+ delayed_event_start(priv);
|
|
|
|
|
|
dev_ctx->context = intf->add(dev);
|
|
|
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
|
|
|
@@ -137,8 +140,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
|
|
|
spin_lock_irq(&priv->ctx_lock);
|
|
|
list_add_tail(&dev_ctx->list, &priv->ctx_list);
|
|
|
|
|
|
- fire_delayed_event_locked(dev_ctx, dev, priv);
|
|
|
-
|
|
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
|
|
if (dev_ctx->intf->pfault) {
|
|
|
if (priv->pfault) {
|
|
|
@@ -150,11 +151,12 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
|
|
|
}
|
|
|
#endif
|
|
|
spin_unlock_irq(&priv->ctx_lock);
|
|
|
- } else {
|
|
|
- kfree(dev_ctx);
|
|
|
- /* delete all accumulated events */
|
|
|
- cleanup_delayed_evets(priv);
|
|
|
}
|
|
|
+
|
|
|
+ delayed_event_release(dev_ctx, priv);
|
|
|
+
|
|
|
+ if (!dev_ctx->context)
|
|
|
+ kfree(dev_ctx);
|
|
|
}
|
|
|
|
|
|
static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
|
|
|
@@ -205,17 +207,21 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
|
|
|
if (!dev_ctx)
|
|
|
return;
|
|
|
|
|
|
+ delayed_event_start(priv);
|
|
|
if (intf->attach) {
|
|
|
if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
|
|
|
- return;
|
|
|
+ goto out;
|
|
|
intf->attach(dev, dev_ctx->context);
|
|
|
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
|
|
|
} else {
|
|
|
if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
|
|
|
- return;
|
|
|
+ goto out;
|
|
|
dev_ctx->context = intf->add(dev);
|
|
|
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
|
|
|
}
|
|
|
+
|
|
|
+out:
|
|
|
+ delayed_event_release(dev_ctx, priv);
|
|
|
}
|
|
|
|
|
|
void mlx5_attach_device(struct mlx5_core_dev *dev)
|
|
|
@@ -414,8 +420,14 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
|
|
|
if (priv->is_accum_events)
|
|
|
add_delayed_event(priv, dev, event, param);
|
|
|
|
|
|
+ /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
|
|
|
+ * still in priv->ctx_list. In this case, only notify the dev_ctx if its
|
|
|
+ * ADDED or ATTACHED bit are set.
|
|
|
+ */
|
|
|
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
|
|
|
- if (dev_ctx->intf->event)
|
|
|
+ if (dev_ctx->intf->event &&
|
|
|
+ (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
|
|
|
+ test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
|
|
|
dev_ctx->intf->event(dev, dev_ctx->context, event, param);
|
|
|
|
|
|
spin_unlock_irqrestore(&priv->ctx_lock, flags);
|