|
@@ -4980,6 +4980,10 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
|
|
if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
|
|
if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Not excluding isolated cpus on purpose.
|
|
|
|
+ * If the user wishes to include them, we allow that.
|
|
|
|
+ */
|
|
cpumask_and(cpumask, cpumask, cpu_possible_mask);
|
|
cpumask_and(cpumask, cpumask, cpu_possible_mask);
|
|
if (!cpumask_empty(cpumask)) {
|
|
if (!cpumask_empty(cpumask)) {
|
|
apply_wqattrs_lock();
|
|
apply_wqattrs_lock();
|
|
@@ -5579,7 +5583,7 @@ int __init workqueue_init_early(void)
|
|
WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
|
|
WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
|
|
|
|
|
|
BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
|
|
BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
|
|
- cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
|
|
|
|
|
|
+ cpumask_andnot(wq_unbound_cpumask, cpu_possible_mask, cpu_isolated_map);
|
|
|
|
|
|
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
|
|
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
|
|
|
|
|