|
@@ -185,6 +185,7 @@ static void health_care(struct work_struct *work)
|
|
|
struct mlx5_core_health *health;
|
|
|
struct mlx5_core_dev *dev;
|
|
|
struct mlx5_priv *priv;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
health = container_of(work, struct mlx5_core_health, work);
|
|
|
priv = container_of(health, struct mlx5_priv, health);
|
|
@@ -192,13 +193,13 @@ static void health_care(struct work_struct *work)
|
|
|
mlx5_core_warn(dev, "handling bad device here\n");
|
|
|
mlx5_handle_bad_state(dev);
|
|
|
|
|
|
- spin_lock(&health->wq_lock);
|
|
|
+ spin_lock_irqsave(&health->wq_lock, flags);
|
|
|
if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
|
|
|
schedule_delayed_work(&health->recover_work, recover_delay);
|
|
|
else
|
|
|
dev_err(&dev->pdev->dev,
|
|
|
"new health works are not permitted at this stage\n");
|
|
|
- spin_unlock(&health->wq_lock);
|
|
|
+ spin_unlock_irqrestore(&health->wq_lock, flags);
|
|
|
}
|
|
|
|
|
|
static const char *hsynd_str(u8 synd)
|
|
@@ -269,6 +270,20 @@ static unsigned long get_next_poll_jiffies(void)
|
|
|
return next;
|
|
|
}
|
|
|
|
|
|
+void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
|
|
|
+{
|
|
|
+ struct mlx5_core_health *health = &dev->priv.health;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&health->wq_lock, flags);
|
|
|
+ if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
|
|
|
+ queue_work(health->wq, &health->work);
|
|
|
+ else
|
|
|
+ dev_err(&dev->pdev->dev,
|
|
|
+ "new health works are not permitted at this stage\n");
|
|
|
+ spin_unlock_irqrestore(&health->wq_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
static void poll_health(unsigned long data)
|
|
|
{
|
|
|
struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
|
|
@@ -297,13 +312,7 @@ static void poll_health(unsigned long data)
|
|
|
if (in_fatal(dev) && !health->sick) {
|
|
|
health->sick = true;
|
|
|
print_health_info(dev);
|
|
|
- spin_lock(&health->wq_lock);
|
|
|
- if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
|
|
|
- queue_work(health->wq, &health->work);
|
|
|
- else
|
|
|
- dev_err(&dev->pdev->dev,
|
|
|
- "new health works are not permitted at this stage\n");
|
|
|
- spin_unlock(&health->wq_lock);
|
|
|
+ mlx5_trigger_health_work(dev);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -333,10 +342,11 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
|
|
|
void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
|
|
|
{
|
|
|
struct mlx5_core_health *health = &dev->priv.health;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- spin_lock(&health->wq_lock);
|
|
|
+ spin_lock_irqsave(&health->wq_lock, flags);
|
|
|
set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
|
|
|
- spin_unlock(&health->wq_lock);
|
|
|
+ spin_unlock_irqrestore(&health->wq_lock, flags);
|
|
|
cancel_delayed_work_sync(&health->recover_work);
|
|
|
cancel_work_sync(&health->work);
|
|
|
}
|