|
@@ -3621,24 +3621,21 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
|
|
|
mutex_unlock(&ctx->wq->mutex);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
|
|
|
- * @wq: the target workqueue
|
|
|
- * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
|
|
|
- *
|
|
|
- * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
|
|
|
- * machines, this function maps a separate pwq to each NUMA node with
|
|
|
- * possibles CPUs in @attrs->cpumask so that work items are affine to the
|
|
|
- * NUMA node it was issued on. Older pwqs are released as in-flight work
|
|
|
- * items finish. Note that a work item which repeatedly requeues itself
|
|
|
- * back-to-back will stay on its current pwq.
|
|
|
- *
|
|
|
- * Performs GFP_KERNEL allocations.
|
|
|
- *
|
|
|
- * Return: 0 on success and -errno on failure.
|
|
|
- */
|
|
|
-int apply_workqueue_attrs(struct workqueue_struct *wq,
|
|
|
- const struct workqueue_attrs *attrs)
|
|
|
+static void apply_wqattrs_lock(void)
|
|
|
+{
|
|
|
+ /* CPUs should stay stable across pwq creations and installations */
|
|
|
+ get_online_cpus();
|
|
|
+ mutex_lock(&wq_pool_mutex);
|
|
|
+}
|
|
|
+
|
|
|
+static void apply_wqattrs_unlock(void)
|
|
|
+{
|
|
|
+ mutex_unlock(&wq_pool_mutex);
|
|
|
+ put_online_cpus();
|
|
|
+}
|
|
|
+
|
|
|
+static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
|
|
|
+ const struct workqueue_attrs *attrs)
|
|
|
{
|
|
|
struct apply_wqattrs_ctx *ctx;
|
|
|
int ret = -ENOMEM;
|
|
@@ -3651,14 +3648,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
|
|
|
if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
|
|
|
return -EINVAL;
|
|
|
|
|
|
- /*
|
|
|
- * CPUs should stay stable across pwq creations and installations.
|
|
|
- * Pin CPUs, determine the target cpumask for each node and create
|
|
|
- * pwqs accordingly.
|
|
|
- */
|
|
|
- get_online_cpus();
|
|
|
- mutex_lock(&wq_pool_mutex);
|
|
|
-
|
|
|
ctx = apply_wqattrs_prepare(wq, attrs);
|
|
|
|
|
|
/* the ctx has been prepared successfully, let's commit it */
|
|
@@ -3667,14 +3656,39 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
|
|
|
ret = 0;
|
|
|
}
|
|
|
|
|
|
- mutex_unlock(&wq_pool_mutex);
|
|
|
- put_online_cpus();
|
|
|
-
|
|
|
apply_wqattrs_cleanup(ctx);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
|
|
|
+ * @wq: the target workqueue
|
|
|
+ * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
|
|
|
+ *
|
|
|
+ * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
|
|
|
+ * machines, this function maps a separate pwq to each NUMA node with
|
|
|
+ * possibles CPUs in @attrs->cpumask so that work items are affine to the
|
|
|
+ * NUMA node it was issued on. Older pwqs are released as in-flight work
|
|
|
+ * items finish. Note that a work item which repeatedly requeues itself
|
|
|
+ * back-to-back will stay on its current pwq.
|
|
|
+ *
|
|
|
+ * Performs GFP_KERNEL allocations.
|
|
|
+ *
|
|
|
+ * Return: 0 on success and -errno on failure.
|
|
|
+ */
|
|
|
+int apply_workqueue_attrs(struct workqueue_struct *wq,
|
|
|
+ const struct workqueue_attrs *attrs)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ apply_wqattrs_lock();
|
|
|
+ ret = apply_workqueue_attrs_locked(wq, attrs);
|
|
|
+ apply_wqattrs_unlock();
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
|
|
|
* @wq: the target workqueue
|
|
@@ -4799,10 +4813,9 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
|
|
|
if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- get_online_cpus();
|
|
|
cpumask_and(cpumask, cpumask, cpu_possible_mask);
|
|
|
if (!cpumask_empty(cpumask)) {
|
|
|
- mutex_lock(&wq_pool_mutex);
|
|
|
+ apply_wqattrs_lock();
|
|
|
|
|
|
/* save the old wq_unbound_cpumask. */
|
|
|
cpumask_copy(saved_cpumask, wq_unbound_cpumask);
|
|
@@ -4815,9 +4828,8 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
|
|
|
if (ret < 0)
|
|
|
cpumask_copy(wq_unbound_cpumask, saved_cpumask);
|
|
|
|
|
|
- mutex_unlock(&wq_pool_mutex);
|
|
|
+ apply_wqattrs_unlock();
|
|
|
}
|
|
|
- put_online_cpus();
|
|
|
|
|
|
free_cpumask_var(saved_cpumask);
|
|
|
return ret;
|