|
@@ -45,11 +45,70 @@ struct mlx5_device_context {
|
|
|
unsigned long state;
|
|
|
};
|
|
|
|
|
|
+struct mlx5_delayed_event {
|
|
|
+ struct list_head list;
|
|
|
+ struct mlx5_core_dev *dev;
|
|
|
+ enum mlx5_dev_event event;
|
|
|
+ unsigned long param;
|
|
|
+};
|
|
|
+
|
|
|
enum {
|
|
|
MLX5_INTERFACE_ADDED,
|
|
|
MLX5_INTERFACE_ATTACHED,
|
|
|
};
|
|
|
|
|
|
+static void add_delayed_event(struct mlx5_priv *priv,
|
|
|
+ struct mlx5_core_dev *dev,
|
|
|
+ enum mlx5_dev_event event,
|
|
|
+ unsigned long param)
|
|
|
+{
|
|
|
+ struct mlx5_delayed_event *delayed_event;
|
|
|
+
|
|
|
+ delayed_event = kzalloc(sizeof(*delayed_event), GFP_ATOMIC);
|
|
|
+ if (!delayed_event) {
|
|
|
+ mlx5_core_err(dev, "event %d is missed\n", event);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ mlx5_core_dbg(dev, "Accumulating event %d\n", event);
|
|
|
+ delayed_event->dev = dev;
|
|
|
+ delayed_event->event = event;
|
|
|
+ delayed_event->param = param;
|
|
|
+ list_add_tail(&delayed_event->list, &priv->waiting_events_list);
|
|
|
+}
|
|
|
+
|
|
|
+static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx,
|
|
|
+ struct mlx5_core_dev *dev,
|
|
|
+ struct mlx5_priv *priv)
|
|
|
+{
|
|
|
+ struct mlx5_delayed_event *de;
|
|
|
+ struct mlx5_delayed_event *n;
|
|
|
+
|
|
|
+ /* stop delaying events */
|
|
|
+ priv->is_accum_events = false;
|
|
|
+
|
|
|
+ /* fire all accumulated events before new event comes */
|
|
|
+ list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
|
|
|
+ dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
|
|
|
+ list_del(&de->list);
|
|
|
+ kfree(de);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void cleanup_delayed_evets(struct mlx5_priv *priv)
|
|
|
+{
|
|
|
+ struct mlx5_delayed_event *de;
|
|
|
+ struct mlx5_delayed_event *n;
|
|
|
+
|
|
|
+ spin_lock_irq(&priv->ctx_lock);
|
|
|
+ priv->is_accum_events = false;
|
|
|
+ list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
|
|
|
+ list_del(&de->list);
|
|
|
+ kfree(de);
|
|
|
+ }
|
|
|
+ spin_unlock_irq(&priv->ctx_lock);
|
|
|
+}
|
|
|
+
|
|
|
void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
|
|
|
{
|
|
|
struct mlx5_device_context *dev_ctx;
|
|
@@ -63,6 +122,12 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
|
|
|
return;
|
|
|
|
|
|
dev_ctx->intf = intf;
|
|
|
+ /* accumulating events that can come after mlx5_ib calls to
|
|
|
+ * ib_register_device, till adding that interface to the events list.
|
|
|
+ */
|
|
|
+
|
|
|
+ priv->is_accum_events = true;
|
|
|
+
|
|
|
dev_ctx->context = intf->add(dev);
|
|
|
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
|
|
|
if (intf->attach)
|
|
@@ -71,6 +136,9 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
|
|
|
if (dev_ctx->context) {
|
|
|
spin_lock_irq(&priv->ctx_lock);
|
|
|
list_add_tail(&dev_ctx->list, &priv->ctx_list);
|
|
|
+
|
|
|
+ fire_delayed_event_locked(dev_ctx, dev, priv);
|
|
|
+
|
|
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
|
|
if (dev_ctx->intf->pfault) {
|
|
|
if (priv->pfault) {
|
|
@@ -84,6 +152,8 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
|
|
|
spin_unlock_irq(&priv->ctx_lock);
|
|
|
} else {
|
|
|
kfree(dev_ctx);
|
|
|
+ /* delete all accumulated events */
|
|
|
+ cleanup_delayed_evets(priv);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -341,6 +411,9 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
|
|
|
|
|
|
spin_lock_irqsave(&priv->ctx_lock, flags);
|
|
|
|
|
|
+ if (priv->is_accum_events)
|
|
|
+ add_delayed_event(priv, dev, event, param);
|
|
|
+
|
|
|
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
|
|
|
if (dev_ctx->intf->event)
|
|
|
dev_ctx->intf->event(dev, dev_ctx->context, event, param);
|