|
@@ -5495,8 +5495,6 @@ int __init workqueue_init_early(void)
|
|
|
|
|
|
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
|
|
|
|
|
|
- wq_numa_init();
|
|
|
-
|
|
|
/* initialize CPU pools */
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
struct worker_pool *pool;
|
|
@@ -5566,9 +5564,32 @@ int __init workqueue_init_early(void)
|
|
|
*/
|
|
|
int __init workqueue_init(void)
|
|
|
{
|
|
|
+ struct workqueue_struct *wq;
|
|
|
struct worker_pool *pool;
|
|
|
int cpu, bkt;
|
|
|
|
|
|
+ /*
|
|
|
+ * It'd be simpler to initialize NUMA in workqueue_init_early() but
|
|
|
+ * CPU to node mapping may not be available that early on some
|
|
|
+ * archs such as power and arm64. As per-cpu pools created
|
|
|
+ * previously could be missing node hint and unbound pools NUMA
|
|
|
+ * affinity, fix them up.
|
|
|
+ */
|
|
|
+ wq_numa_init();
|
|
|
+
|
|
|
+ mutex_lock(&wq_pool_mutex);
|
|
|
+
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+ for_each_cpu_worker_pool(pool, cpu) {
|
|
|
+ pool->node = cpu_to_node(cpu);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ list_for_each_entry(wq, &workqueues, list)
|
|
|
+ wq_update_unbound_numa(wq, smp_processor_id(), true);
|
|
|
+
|
|
|
+ mutex_unlock(&wq_pool_mutex);
|
|
|
+
|
|
|
/* create the initial workers */
|
|
|
for_each_online_cpu(cpu) {
|
|
|
for_each_cpu_worker_pool(pool, cpu) {
|