Browse Source

rcu: Create reasonable API for do_exit() TASKS_RCU processing

Currently, the exit-time support for TASKS_RCU is open-coded in do_exit().
This commit creates exit_tasks_rcu_start() and exit_tasks_rcu_finish()
APIs for do_exit() use.  This has the benefit of confining the use of the
tasks_rcu_exit_srcu variable to one file, allowing it to become static.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Paul E. McKenney 8 years ago
parent
commit
ccdd29ffff
4 changed files with 26 additions and 11 deletions
  1. 4 3
      include/linux/rcupdate.h
  2. 3 2
      include/linux/sched.h
  3. 2 5
      kernel/exit.c
  4. 17 1
      kernel/rcu/update.c

+ 4 - 3
include/linux/rcupdate.h

@@ -162,8 +162,6 @@ static inline void rcu_init_nohz(void) { }
  * macro rather than an inline function to avoid #include hell.
  * macro rather than an inline function to avoid #include hell.
  */
  */
 #ifdef CONFIG_TASKS_RCU
 #ifdef CONFIG_TASKS_RCU
-#define TASKS_RCU(x) x
-extern struct srcu_struct tasks_rcu_exit_srcu;
 #define rcu_note_voluntary_context_switch_lite(t) \
 #define rcu_note_voluntary_context_switch_lite(t) \
 	do { \
 	do { \
 		if (READ_ONCE((t)->rcu_tasks_holdout)) \
 		if (READ_ONCE((t)->rcu_tasks_holdout)) \
@@ -176,12 +174,15 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
 	} while (0)
 	} while (0)
 void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
 void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
 void synchronize_rcu_tasks(void);
 void synchronize_rcu_tasks(void);
+void exit_tasks_rcu_start(void);
+void exit_tasks_rcu_finish(void);
 #else /* #ifdef CONFIG_TASKS_RCU */
 #else /* #ifdef CONFIG_TASKS_RCU */
-#define TASKS_RCU(x) do { } while (0)
 #define rcu_note_voluntary_context_switch_lite(t)	do { } while (0)
 #define rcu_note_voluntary_context_switch_lite(t)	do { } while (0)
 #define rcu_note_voluntary_context_switch(t)		rcu_all_qs()
 #define rcu_note_voluntary_context_switch(t)		rcu_all_qs()
 #define call_rcu_tasks call_rcu_sched
 #define call_rcu_tasks call_rcu_sched
 #define synchronize_rcu_tasks synchronize_sched
 #define synchronize_rcu_tasks synchronize_sched
+static inline void exit_tasks_rcu_start(void) { }
+static inline void exit_tasks_rcu_finish(void) { }
 #endif /* #else #ifdef CONFIG_TASKS_RCU */
 #endif /* #else #ifdef CONFIG_TASKS_RCU */
 
 
 /**
 /**

+ 3 - 2
include/linux/sched.h

@@ -589,9 +589,10 @@ struct task_struct {
 
 
 #ifdef CONFIG_TASKS_RCU
 #ifdef CONFIG_TASKS_RCU
 	unsigned long			rcu_tasks_nvcsw;
 	unsigned long			rcu_tasks_nvcsw;
-	bool				rcu_tasks_holdout;
-	struct list_head		rcu_tasks_holdout_list;
+	u8				rcu_tasks_holdout;
+	u8				rcu_tasks_idx;
 	int				rcu_tasks_idle_cpu;
 	int				rcu_tasks_idle_cpu;
+	struct list_head		rcu_tasks_holdout_list;
 #endif /* #ifdef CONFIG_TASKS_RCU */
 #endif /* #ifdef CONFIG_TASKS_RCU */
 
 
 	struct sched_info		sched_info;
 	struct sched_info		sched_info;

+ 2 - 5
kernel/exit.c

@@ -764,7 +764,6 @@ void __noreturn do_exit(long code)
 {
 {
 	struct task_struct *tsk = current;
 	struct task_struct *tsk = current;
 	int group_dead;
 	int group_dead;
-	TASKS_RCU(int tasks_rcu_i);
 
 
 	profile_task_exit(tsk);
 	profile_task_exit(tsk);
 	kcov_task_exit(tsk);
 	kcov_task_exit(tsk);
@@ -881,9 +880,7 @@ void __noreturn do_exit(long code)
 	 */
 	 */
 	flush_ptrace_hw_breakpoint(tsk);
 	flush_ptrace_hw_breakpoint(tsk);
 
 
-	TASKS_RCU(preempt_disable());
-	TASKS_RCU(tasks_rcu_i = __srcu_read_lock(&tasks_rcu_exit_srcu));
-	TASKS_RCU(preempt_enable());
+	exit_tasks_rcu_start();
 	exit_notify(tsk, group_dead);
 	exit_notify(tsk, group_dead);
 	proc_exit_connector(tsk);
 	proc_exit_connector(tsk);
 	mpol_put_task_policy(tsk);
 	mpol_put_task_policy(tsk);
@@ -918,7 +915,7 @@ void __noreturn do_exit(long code)
 	if (tsk->nr_dirtied)
 	if (tsk->nr_dirtied)
 		__this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
 		__this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
 	exit_rcu();
 	exit_rcu();
-	TASKS_RCU(__srcu_read_unlock(&tasks_rcu_exit_srcu, tasks_rcu_i));
+	exit_tasks_rcu_finish();
 
 
 	do_task_dead();
 	do_task_dead();
 }
 }

+ 17 - 1
kernel/rcu/update.c

@@ -568,7 +568,7 @@ static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
 
 
 /* Track exiting tasks in order to allow them to be waited for. */
 /* Track exiting tasks in order to allow them to be waited for. */
-DEFINE_SRCU(tasks_rcu_exit_srcu);
+DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
 
 
 /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
 /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
@@ -875,6 +875,22 @@ static void rcu_spawn_tasks_kthread(void)
 	mutex_unlock(&rcu_tasks_kthread_mutex);
 	mutex_unlock(&rcu_tasks_kthread_mutex);
 }
 }
 
 
+/* Do the srcu_read_lock() for the above synchronize_srcu().  */
+void exit_tasks_rcu_start(void)
+{
+	preempt_disable();
+	current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
+	preempt_enable();
+}
+
+/* Do the srcu_read_unlock() for the above synchronize_srcu().  */
+void exit_tasks_rcu_finish(void)
+{
+	preempt_disable();
+	__srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx);
+	preempt_enable();
+}
+
 #endif /* #ifdef CONFIG_TASKS_RCU */
 #endif /* #ifdef CONFIG_TASKS_RCU */
 
 
 #ifndef CONFIG_TINY_RCU
 #ifndef CONFIG_TINY_RCU