|
@@ -435,7 +435,6 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
|
|
|
|
|
extern void flush_workqueue(struct workqueue_struct *wq);
|
|
|
extern void drain_workqueue(struct workqueue_struct *wq);
|
|
|
-extern void flush_scheduled_work(void);
|
|
|
|
|
|
extern int schedule_on_each_cpu(work_func_t func);
|
|
|
|
|
@@ -531,6 +530,35 @@ static inline bool schedule_work(struct work_struct *work)
|
|
|
return queue_work(system_wq, work);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * flush_scheduled_work - ensure that any scheduled work has run to completion.
|
|
|
+ *
|
|
|
+ * Forces execution of the kernel-global workqueue and blocks until its
|
|
|
+ * completion.
|
|
|
+ *
|
|
|
+ * Think twice before calling this function! It's very easy to get into
|
|
|
+ * trouble if you don't take great care. Either of the following situations
|
|
|
+ * will lead to deadlock:
|
|
|
+ *
|
|
|
+ * One of the work items currently on the workqueue needs to acquire
|
|
|
+ * a lock held by your code or its caller.
|
|
|
+ *
|
|
|
+ * Your code is running in the context of a work routine.
|
|
|
+ *
|
|
|
+ * They will be detected by lockdep when they occur, but the first might not
|
|
|
+ * occur very often. It depends on what work items are on the workqueue and
|
|
|
+ * what locks they need, which you have no control over.
|
|
|
+ *
|
|
|
+ * In most situations flushing the entire workqueue is overkill; you merely
|
|
|
+ * need to know that a particular work item isn't queued and isn't running.
|
|
|
+ * In such cases you should use cancel_delayed_work_sync() or
|
|
|
+ * cancel_work_sync() instead.
|
|
|
+ */
|
|
|
+static inline void flush_scheduled_work(void)
|
|
|
+{
|
|
|
+ flush_workqueue(system_wq);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
|
|
|
* @cpu: cpu to use
|