|
@@ -381,6 +381,61 @@ void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp))
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(call_rcu_tasks);
|
|
|
|
|
|
+/**
|
|
|
+ * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
|
|
|
+ *
|
|
|
+ * Control will return to the caller some time after a full rcu-tasks
|
|
|
+ * grace period has elapsed, in other words after all currently
|
|
|
+ * executing rcu-tasks read-side critical sections have elapsed. These
|
|
|
+ * read-side critical sections are delimited by calls to schedule(),
|
|
|
+ * cond_resched_rcu_qs(), idle execution, userspace execution, calls
|
|
|
+ * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
|
|
|
+ *
|
|
|
+ * This is a very specialized primitive, intended only for a few uses in
|
|
|
+ * tracing and other situations requiring manipulation of function
|
|
|
+ * preambles and profiling hooks. The synchronize_rcu_tasks() function
|
|
|
+ * is not (yet) intended for heavy use from multiple CPUs.
|
|
|
+ *
|
|
|
+ * Note that this guarantee implies further memory-ordering guarantees.
|
|
|
+ * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
|
|
|
+ * each CPU is guaranteed to have executed a full memory barrier since the
|
|
|
+ * end of its last RCU-tasks read-side critical section whose beginning
|
|
|
+ * preceded the call to synchronize_rcu_tasks(). In addition, each CPU
|
|
|
+ * having an RCU-tasks read-side critical section that extends beyond
|
|
|
+ * the return from synchronize_rcu_tasks() is guaranteed to have executed
|
|
|
+ * a full memory barrier after the beginning of synchronize_rcu_tasks()
|
|
|
+ * and before the beginning of that RCU-tasks read-side critical section.
|
|
|
+ * Note that these guarantees include CPUs that are offline, idle, or
|
|
|
+ * executing in user mode, as well as CPUs that are executing in the kernel.
|
|
|
+ *
|
|
|
+ * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
|
|
|
+ * to its caller on CPU B, then both CPU A and CPU B are guaranteed
|
|
|
+ * to have executed a full memory barrier during the execution of
|
|
|
+ * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
|
|
|
+ * (but again only if the system has more than one CPU).
|
|
|
+ */
|
|
|
+void synchronize_rcu_tasks(void)
|
|
|
+{
|
|
|
+ /* Complain if the scheduler has not started. */
|
|
|
+ rcu_lockdep_assert(!rcu_scheduler_active,
|
|
|
+ "synchronize_rcu_tasks called too soon");
|
|
|
+
|
|
|
+ /* Wait for the grace period. */
|
|
|
+ wait_rcu_gp(call_rcu_tasks);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
|
|
|
+ *
|
|
|
+ * Although the current implementation is guaranteed to wait, it is not
|
|
|
+ * obligated to, for example, if there are no pending callbacks.
|
|
|
+ */
|
|
|
+void rcu_barrier_tasks(void)
|
|
|
+{
|
|
|
+ /* There is only one callback queue, so this is easy. ;-) */
|
|
|
+ synchronize_rcu_tasks();
|
|
|
+}
|
|
|
+
|
|
|
/* See if the current task has stopped holding out, remove from list if so. */
|
|
|
static void check_holdout_task(struct task_struct *t)
|
|
|
{
|