|
@@ -1804,8 +1804,8 @@ static void pool_mayday_timeout(unsigned long __pool)
|
|
|
struct worker_pool *pool = (void *)__pool;
|
|
|
struct work_struct *work;
|
|
|
|
|
|
- spin_lock_irq(&wq_mayday_lock); /* for wq->maydays */
|
|
|
- spin_lock(&pool->lock);
|
|
|
+ spin_lock_irq(&pool->lock);
|
|
|
+ spin_lock(&wq_mayday_lock); /* for wq->maydays */
|
|
|
|
|
|
if (need_to_create_worker(pool)) {
|
|
|
/*
|
|
@@ -1818,8 +1818,8 @@ static void pool_mayday_timeout(unsigned long __pool)
|
|
|
send_mayday(work);
|
|
|
}
|
|
|
|
|
|
- spin_unlock(&pool->lock);
|
|
|
- spin_unlock_irq(&wq_mayday_lock);
|
|
|
+ spin_unlock(&wq_mayday_lock);
|
|
|
+ spin_unlock_irq(&pool->lock);
|
|
|
|
|
|
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
|
|
|
}
|
|
@@ -2248,12 +2248,30 @@ repeat:
|
|
|
* Slurp in all works issued via this workqueue and
|
|
|
* process'em.
|
|
|
*/
|
|
|
- WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
|
|
|
+ WARN_ON_ONCE(!list_empty(scheduled));
|
|
|
list_for_each_entry_safe(work, n, &pool->worklist, entry)
|
|
|
if (get_work_pwq(work) == pwq)
|
|
|
move_linked_works(work, scheduled, &n);
|
|
|
|
|
|
- process_scheduled_works(rescuer);
|
|
|
+ if (!list_empty(scheduled)) {
|
|
|
+ process_scheduled_works(rescuer);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The above execution of rescued work items could
|
|
|
+ * have created more to rescue through
|
|
|
+ * pwq_activate_first_delayed() or chained
|
|
|
+ * queueing. Let's put @pwq back on mayday list so
|
|
|
+ * that such back-to-back work items, which may be
|
|
|
+ * being used to relieve memory pressure, don't
|
|
|
+ * incur MAYDAY_INTERVAL delay inbetween.
|
|
|
+ */
|
|
|
+ if (need_to_create_worker(pool)) {
|
|
|
+ spin_lock(&wq_mayday_lock);
|
|
|
+ get_pwq(pwq);
|
|
|
+ list_move_tail(&pwq->mayday_node, &wq->maydays);
|
|
|
+ spin_unlock(&wq_mayday_lock);
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Put the reference grabbed by send_mayday(). @pool won't
|