|
@@ -1603,70 +1603,6 @@ static void worker_leave_idle(struct worker *worker)
|
|
list_del_init(&worker->entry);
|
|
list_del_init(&worker->entry);
|
|
}
|
|
}
|
|
|
|
|
|
-/**
|
|
|
|
- * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it
|
|
|
|
- * @pool: target worker_pool
|
|
|
|
- *
|
|
|
|
- * Bind %current to the cpu of @pool if it is associated and lock @pool.
|
|
|
|
- *
|
|
|
|
- * Works which are scheduled while the cpu is online must at least be
|
|
|
|
- * scheduled to a worker which is bound to the cpu so that if they are
|
|
|
|
- * flushed from cpu callbacks while cpu is going down, they are
|
|
|
|
- * guaranteed to execute on the cpu.
|
|
|
|
- *
|
|
|
|
- * This function is to be used by unbound workers and rescuers to bind
|
|
|
|
- * themselves to the target cpu and may race with cpu going down or
|
|
|
|
- * coming online. kthread_bind() can't be used because it may put the
|
|
|
|
- * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
|
|
|
|
- * verbatim as it's best effort and blocking and pool may be
|
|
|
|
- * [dis]associated in the meantime.
|
|
|
|
- *
|
|
|
|
- * This function tries set_cpus_allowed() and locks pool and verifies the
|
|
|
|
- * binding against %POOL_DISASSOCIATED which is set during
|
|
|
|
- * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
|
|
|
|
- * enters idle state or fetches works without dropping lock, it can
|
|
|
|
- * guarantee the scheduling requirement described in the first paragraph.
|
|
|
|
- *
|
|
|
|
- * CONTEXT:
|
|
|
|
- * Might sleep. Called without any lock but returns with pool->lock
|
|
|
|
- * held.
|
|
|
|
- *
|
|
|
|
- * Return:
|
|
|
|
- * %true if the associated pool is online (@worker is successfully
|
|
|
|
- * bound), %false if offline.
|
|
|
|
- */
|
|
|
|
-static bool worker_maybe_bind_and_lock(struct worker_pool *pool)
|
|
|
|
-__acquires(&pool->lock)
|
|
|
|
-{
|
|
|
|
- while (true) {
|
|
|
|
- /*
|
|
|
|
- * The following call may fail, succeed or succeed
|
|
|
|
- * without actually migrating the task to the cpu if
|
|
|
|
- * it races with cpu hotunplug operation. Verify
|
|
|
|
- * against POOL_DISASSOCIATED.
|
|
|
|
- */
|
|
|
|
- if (!(pool->flags & POOL_DISASSOCIATED))
|
|
|
|
- set_cpus_allowed_ptr(current, pool->attrs->cpumask);
|
|
|
|
-
|
|
|
|
- spin_lock_irq(&pool->lock);
|
|
|
|
- if (pool->flags & POOL_DISASSOCIATED)
|
|
|
|
- return false;
|
|
|
|
- if (task_cpu(current) == pool->cpu &&
|
|
|
|
- cpumask_equal(¤t->cpus_allowed, pool->attrs->cpumask))
|
|
|
|
- return true;
|
|
|
|
- spin_unlock_irq(&pool->lock);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * We've raced with CPU hot[un]plug. Give it a breather
|
|
|
|
- * and retry migration. cond_resched() is required here;
|
|
|
|
- * otherwise, we might deadlock against cpu_stop trying to
|
|
|
|
- * bring down the CPU on non-preemptive kernel.
|
|
|
|
- */
|
|
|
|
- cpu_relax();
|
|
|
|
- cond_resched();
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static struct worker *alloc_worker(void)
|
|
static struct worker *alloc_worker(void)
|
|
{
|
|
{
|
|
struct worker *worker;
|
|
struct worker *worker;
|
|
@@ -2361,8 +2297,9 @@ repeat:
|
|
|
|
|
|
spin_unlock_irq(&wq_mayday_lock);
|
|
spin_unlock_irq(&wq_mayday_lock);
|
|
|
|
|
|
- /* migrate to the target cpu if possible */
|
|
|
|
- worker_maybe_bind_and_lock(pool);
|
|
|
|
|
|
+ worker_attach_to_pool(rescuer, pool);
|
|
|
|
+
|
|
|
|
+ spin_lock_irq(&pool->lock);
|
|
rescuer->pool = pool;
|
|
rescuer->pool = pool;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -2375,6 +2312,11 @@ repeat:
|
|
move_linked_works(work, scheduled, &n);
|
|
move_linked_works(work, scheduled, &n);
|
|
|
|
|
|
process_scheduled_works(rescuer);
|
|
process_scheduled_works(rescuer);
|
|
|
|
+ spin_unlock_irq(&pool->lock);
|
|
|
|
+
|
|
|
|
+ worker_detach_from_pool(rescuer, pool);
|
|
|
|
+
|
|
|
|
+ spin_lock_irq(&pool->lock);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Put the reference grabbed by send_mayday(). @pool won't
|
|
* Put the reference grabbed by send_mayday(). @pool won't
|