|
@@ -90,6 +90,25 @@ extern void do_trace_rcu_torture_read(char *rcutorturename,
|
|
* that started after call_rcu() was invoked. RCU read-side critical
|
|
* that started after call_rcu() was invoked. RCU read-side critical
|
|
* sections are delimited by rcu_read_lock() and rcu_read_unlock(),
|
|
* sections are delimited by rcu_read_lock() and rcu_read_unlock(),
|
|
* and may be nested.
|
|
* and may be nested.
|
|
|
|
+ *
|
|
|
|
+ * Note that all CPUs must agree that the grace period extended beyond
|
|
|
|
+ * all pre-existing RCU read-side critical section. On systems with more
|
|
|
|
+ * than one CPU, this means that when "func()" is invoked, each CPU is
|
|
|
|
+ * guaranteed to have executed a full memory barrier since the end of its
|
|
|
|
+ * last RCU read-side critical section whose beginning preceded the call
|
|
|
|
+ * to call_rcu(). It also means that each CPU executing an RCU read-side
|
|
|
|
+ * critical section that continues beyond the start of "func()" must have
|
|
|
|
+ * executed a memory barrier after the call_rcu() but before the beginning
|
|
|
|
+ * of that RCU read-side critical section. Note that these guarantees
|
|
|
|
+ * include CPUs that are offline, idle, or executing in user mode, as
|
|
|
|
+ * well as CPUs that are executing in the kernel.
|
|
|
|
+ *
|
|
|
|
+ * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
|
|
|
|
+ * resulting RCU callback function "func()", then both CPU A and CPU B are
|
|
|
|
+ * guaranteed to execute a full memory barrier during the time interval
|
|
|
|
+ * between the call to call_rcu() and the invocation of "func()" -- even
|
|
|
|
+ * if CPU A and CPU B are the same CPU (but again only if the system has
|
|
|
|
+ * more than one CPU).
|
|
*/
|
|
*/
|
|
extern void call_rcu(struct rcu_head *head,
|
|
extern void call_rcu(struct rcu_head *head,
|
|
void (*func)(struct rcu_head *head));
|
|
void (*func)(struct rcu_head *head));
|
|
@@ -118,6 +137,9 @@ extern void call_rcu(struct rcu_head *head,
|
|
* OR
|
|
* OR
|
|
* - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
|
|
* - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
|
|
* These may be nested.
|
|
* These may be nested.
|
|
|
|
+ *
|
|
|
|
+ * See the description of call_rcu() for more detailed information on
|
|
|
|
+ * memory ordering guarantees.
|
|
*/
|
|
*/
|
|
extern void call_rcu_bh(struct rcu_head *head,
|
|
extern void call_rcu_bh(struct rcu_head *head,
|
|
void (*func)(struct rcu_head *head));
|
|
void (*func)(struct rcu_head *head));
|
|
@@ -137,6 +159,9 @@ extern void call_rcu_bh(struct rcu_head *head,
|
|
* OR
|
|
* OR
|
|
* anything that disables preemption.
|
|
* anything that disables preemption.
|
|
* These may be nested.
|
|
* These may be nested.
|
|
|
|
+ *
|
|
|
|
+ * See the description of call_rcu() for more detailed information on
|
|
|
|
+ * memory ordering guarantees.
|
|
*/
|
|
*/
|
|
extern void call_rcu_sched(struct rcu_head *head,
|
|
extern void call_rcu_sched(struct rcu_head *head,
|
|
void (*func)(struct rcu_head *rcu));
|
|
void (*func)(struct rcu_head *rcu));
|