|
@@ -2652,6 +2652,9 @@ void flush_workqueue(struct workqueue_struct *wq)
|
|
if (WARN_ON(!wq_online))
|
|
if (WARN_ON(!wq_online))
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
+ lock_map_acquire(&wq->lockdep_map);
|
|
|
|
+ lock_map_release(&wq->lockdep_map);
|
|
|
|
+
|
|
mutex_lock(&wq->mutex);
|
|
mutex_lock(&wq->mutex);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -2843,7 +2846,8 @@ reflush:
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(drain_workqueue);
|
|
EXPORT_SYMBOL_GPL(drain_workqueue);
|
|
|
|
|
|
-static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
|
|
|
|
|
|
+static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
|
|
|
|
+ bool from_cancel)
|
|
{
|
|
{
|
|
struct worker *worker = NULL;
|
|
struct worker *worker = NULL;
|
|
struct worker_pool *pool;
|
|
struct worker_pool *pool;
|
|
@@ -2885,7 +2889,8 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
|
|
* workqueues the deadlock happens when the rescuer stalls, blocking
|
|
* workqueues the deadlock happens when the rescuer stalls, blocking
|
|
* forward progress.
|
|
* forward progress.
|
|
*/
|
|
*/
|
|
- if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) {
|
|
|
|
|
|
+ if (!from_cancel &&
|
|
|
|
+ (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
|
|
lock_map_acquire(&pwq->wq->lockdep_map);
|
|
lock_map_acquire(&pwq->wq->lockdep_map);
|
|
lock_map_release(&pwq->wq->lockdep_map);
|
|
lock_map_release(&pwq->wq->lockdep_map);
|
|
}
|
|
}
|
|
@@ -2896,6 +2901,27 @@ already_gone:
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static bool __flush_work(struct work_struct *work, bool from_cancel)
|
|
|
|
+{
|
|
|
|
+ struct wq_barrier barr;
|
|
|
|
+
|
|
|
|
+ if (WARN_ON(!wq_online))
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ if (!from_cancel) {
|
|
|
|
+ lock_map_acquire(&work->lockdep_map);
|
|
|
|
+ lock_map_release(&work->lockdep_map);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (start_flush_work(work, &barr, from_cancel)) {
|
|
|
|
+ wait_for_completion(&barr.done);
|
|
|
|
+ destroy_work_on_stack(&barr.work);
|
|
|
|
+ return true;
|
|
|
|
+ } else {
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* flush_work - wait for a work to finish executing the last queueing instance
|
|
* flush_work - wait for a work to finish executing the last queueing instance
|
|
* @work: the work to flush
|
|
* @work: the work to flush
|
|
@@ -2909,18 +2935,7 @@ already_gone:
|
|
*/
|
|
*/
|
|
bool flush_work(struct work_struct *work)
|
|
bool flush_work(struct work_struct *work)
|
|
{
|
|
{
|
|
- struct wq_barrier barr;
|
|
|
|
-
|
|
|
|
- if (WARN_ON(!wq_online))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- if (start_flush_work(work, &barr)) {
|
|
|
|
- wait_for_completion(&barr.done);
|
|
|
|
- destroy_work_on_stack(&barr.work);
|
|
|
|
- return true;
|
|
|
|
- } else {
|
|
|
|
- return false;
|
|
|
|
- }
|
|
|
|
|
|
+ return __flush_work(work, false);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(flush_work);
|
|
EXPORT_SYMBOL_GPL(flush_work);
|
|
|
|
|
|
@@ -2986,7 +3001,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
|
|
* isn't executing.
|
|
* isn't executing.
|
|
*/
|
|
*/
|
|
if (wq_online)
|
|
if (wq_online)
|
|
- flush_work(work);
|
|
|
|
|
|
+ __flush_work(work, true);
|
|
|
|
|
|
clear_work_data(work);
|
|
clear_work_data(work);
|
|
|
|
|