|
@@ -718,6 +718,19 @@ kthread_create_worker_on_cpu(int cpu, const char namefmt[], ...)
|
|
|
}
|
|
|
EXPORT_SYMBOL(kthread_create_worker_on_cpu);
|
|
|
|
|
|
+/*
|
|
|
+ * Returns true when the work could not be queued at the moment.
|
|
|
+ * It happens when it is already pending in a worker list
|
|
|
+ * or when it is being cancelled.
|
|
|
+ */
|
|
|
+static inline bool queuing_blocked(struct kthread_worker *worker,
|
|
|
+ struct kthread_work *work)
|
|
|
+{
|
|
|
+ lockdep_assert_held(&worker->lock);
|
|
|
+
|
|
|
+ return !list_empty(&work->node) || work->canceling;
|
|
|
+}
|
|
|
+
|
|
|
static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
|
|
|
struct kthread_work *work)
|
|
|
{
|
|
@@ -759,7 +772,7 @@ bool kthread_queue_work(struct kthread_worker *worker,
|
|
|
unsigned long flags;
|
|
|
|
|
|
spin_lock_irqsave(&worker->lock, flags);
|
|
|
- if (list_empty(&work->node)) {
|
|
|
+ if (!queuing_blocked(worker, work)) {
|
|
|
kthread_insert_work(worker, work, &worker->work_list);
|
|
|
ret = true;
|
|
|
}
|
|
@@ -859,7 +872,7 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
|
|
|
|
|
|
spin_lock_irqsave(&worker->lock, flags);
|
|
|
|
|
|
- if (list_empty(&work->node)) {
|
|
|
+ if (!queuing_blocked(worker, work)) {
|
|
|
__kthread_queue_delayed_work(worker, dwork, delay);
|
|
|
ret = true;
|
|
|
}
|
|
@@ -919,6 +932,121 @@ void kthread_flush_work(struct kthread_work *work)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kthread_flush_work);
|
|
|
|
|
|
+/*
|
|
|
+ * This function removes the work from the worker queue. Also it makes sure
|
|
|
+ * that it won't get queued later via the delayed work's timer.
|
|
|
+ *
|
|
|
+ * The work might still be in use when this function finishes. See the
|
|
|
+ * current_work proceed by the worker.
|
|
|
+ *
|
|
|
+ * Return: %true if @work was pending and successfully canceled,
|
|
|
+ * %false if @work was not pending
|
|
|
+ */
|
|
|
+static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
|
|
|
+ unsigned long *flags)
|
|
|
+{
|
|
|
+ /* Try to cancel the timer if exists. */
|
|
|
+ if (is_dwork) {
|
|
|
+ struct kthread_delayed_work *dwork =
|
|
|
+ container_of(work, struct kthread_delayed_work, work);
|
|
|
+ struct kthread_worker *worker = work->worker;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * del_timer_sync() must be called to make sure that the timer
|
|
|
+ * callback is not running. The lock must be temporary released
|
|
|
+ * to avoid a deadlock with the callback. In the meantime,
|
|
|
+ * any queuing is blocked by setting the canceling counter.
|
|
|
+ */
|
|
|
+ work->canceling++;
|
|
|
+ spin_unlock_irqrestore(&worker->lock, *flags);
|
|
|
+ del_timer_sync(&dwork->timer);
|
|
|
+ spin_lock_irqsave(&worker->lock, *flags);
|
|
|
+ work->canceling--;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Try to remove the work from a worker list. It might either
|
|
|
+ * be from worker->work_list or from worker->delayed_work_list.
|
|
|
+ */
|
|
|
+ if (!list_empty(&work->node)) {
|
|
|
+ list_del_init(&work->node);
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
|
|
|
+{
|
|
|
+ struct kthread_worker *worker = work->worker;
|
|
|
+ unsigned long flags;
|
|
|
+ int ret = false;
|
|
|
+
|
|
|
+ if (!worker)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&worker->lock, flags);
|
|
|
+ /* Work must not be used with >1 worker, see kthread_queue_work(). */
|
|
|
+ WARN_ON_ONCE(work->worker != worker);
|
|
|
+
|
|
|
+ ret = __kthread_cancel_work(work, is_dwork, &flags);
|
|
|
+
|
|
|
+ if (worker->current_work != work)
|
|
|
+ goto out_fast;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The work is in progress and we need to wait with the lock released.
|
|
|
+ * In the meantime, block any queuing by setting the canceling counter.
|
|
|
+ */
|
|
|
+ work->canceling++;
|
|
|
+ spin_unlock_irqrestore(&worker->lock, flags);
|
|
|
+ kthread_flush_work(work);
|
|
|
+ spin_lock_irqsave(&worker->lock, flags);
|
|
|
+ work->canceling--;
|
|
|
+
|
|
|
+out_fast:
|
|
|
+ spin_unlock_irqrestore(&worker->lock, flags);
|
|
|
+out:
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
|
|
|
+ * @work: the kthread work to cancel
|
|
|
+ *
|
|
|
+ * Cancel @work and wait for its execution to finish. This function
|
|
|
+ * can be used even if the work re-queues itself. On return from this
|
|
|
+ * function, @work is guaranteed to be not pending or executing on any CPU.
|
|
|
+ *
|
|
|
+ * kthread_cancel_work_sync(&delayed_work->work) must not be used for
|
|
|
+ * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
|
|
|
+ *
|
|
|
+ * The caller must ensure that the worker on which @work was last
|
|
|
+ * queued can't be destroyed before this function returns.
|
|
|
+ *
|
|
|
+ * Return: %true if @work was pending, %false otherwise.
|
|
|
+ */
|
|
|
+bool kthread_cancel_work_sync(struct kthread_work *work)
|
|
|
+{
|
|
|
+ return __kthread_cancel_work_sync(work, false);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
|
|
|
+
|
|
|
+/**
|
|
|
+ * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
|
|
|
+ * wait for it to finish.
|
|
|
+ * @dwork: the kthread delayed work to cancel
|
|
|
+ *
|
|
|
+ * This is kthread_cancel_work_sync() for delayed works.
|
|
|
+ *
|
|
|
+ * Return: %true if @dwork was pending, %false otherwise.
|
|
|
+ */
|
|
|
+bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
|
|
|
+{
|
|
|
+ return __kthread_cancel_work_sync(&dwork->work, true);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
|
|
|
+
|
|
|
/**
|
|
|
* kthread_flush_worker - flush all current works on a kthread_worker
|
|
|
* @worker: worker to flush
|