瀏覽代碼

rcu: Apply micro-optimization and int/bool fixes to RCU's idle handling

Checking "user" before "is_idle_task()" allows better optimizations
in cases where inlining is possible.  Also, "bool" should be passed
"true" or "false" rather than "1" or "0".  This commit therefore makes
these changes, as noted in Josh's review.

Reported-by: Josh Triplett <josh@joshtriplett.org>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Paul E. McKenney 13 年之前
父節點
當前提交
cb349ca954
共有 1 個文件被更改,包括 8 次插入8 次删除
  1. 8 8
      kernel/rcutree.c

+ 8 - 8
kernel/rcutree.c

@@ -335,7 +335,7 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
 				bool user)
 				bool user)
 {
 {
 	trace_rcu_dyntick("Start", oldval, 0);
 	trace_rcu_dyntick("Start", oldval, 0);
-	if (!is_idle_task(current) && !user) {
+	if (!user && !is_idle_task(current)) {
 		struct task_struct *idle = idle_task(smp_processor_id());
 		struct task_struct *idle = idle_task(smp_processor_id());
 
 
 		trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
 		trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
@@ -399,7 +399,7 @@ void rcu_idle_enter(void)
 	unsigned long flags;
 	unsigned long flags;
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
-	rcu_eqs_enter(0);
+	rcu_eqs_enter(false);
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
 EXPORT_SYMBOL_GPL(rcu_idle_enter);
 EXPORT_SYMBOL_GPL(rcu_idle_enter);
@@ -435,7 +435,7 @@ void rcu_user_enter(void)
 	rdtp = &__get_cpu_var(rcu_dynticks);
 	rdtp = &__get_cpu_var(rcu_dynticks);
 	if (!rdtp->ignore_user_qs && !rdtp->in_user) {
 	if (!rdtp->ignore_user_qs && !rdtp->in_user) {
 		rdtp->in_user = true;
 		rdtp->in_user = true;
-		rcu_eqs_enter(1);
+		rcu_eqs_enter(true);
 	}
 	}
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
@@ -492,7 +492,7 @@ void rcu_irq_exit(void)
 	if (rdtp->dynticks_nesting)
 	if (rdtp->dynticks_nesting)
 		trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
 		trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
 	else
 	else
-		rcu_eqs_enter_common(rdtp, oldval, 1);
+		rcu_eqs_enter_common(rdtp, oldval, true);
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
 
 
@@ -513,7 +513,7 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
 	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
 	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
 	rcu_cleanup_after_idle(smp_processor_id());
 	rcu_cleanup_after_idle(smp_processor_id());
 	trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
 	trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
-	if (!is_idle_task(current) && !user) {
+	if (!user && !is_idle_task(current)) {
 		struct task_struct *idle = idle_task(smp_processor_id());
 		struct task_struct *idle = idle_task(smp_processor_id());
 
 
 		trace_rcu_dyntick("Error on exit: not idle task",
 		trace_rcu_dyntick("Error on exit: not idle task",
@@ -560,7 +560,7 @@ void rcu_idle_exit(void)
 	unsigned long flags;
 	unsigned long flags;
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
-	rcu_eqs_exit(0);
+	rcu_eqs_exit(false);
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
 EXPORT_SYMBOL_GPL(rcu_idle_exit);
 EXPORT_SYMBOL_GPL(rcu_idle_exit);
@@ -592,7 +592,7 @@ void rcu_user_exit(void)
 	rdtp = &__get_cpu_var(rcu_dynticks);
 	rdtp = &__get_cpu_var(rcu_dynticks);
 	if (rdtp->in_user) {
 	if (rdtp->in_user) {
 		rdtp->in_user = false;
 		rdtp->in_user = false;
-		rcu_eqs_exit(1);
+		rcu_eqs_exit(true);
 	}
 	}
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
@@ -653,7 +653,7 @@ void rcu_irq_enter(void)
 	if (oldval)
 	if (oldval)
 		trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
 		trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
 	else
 	else
-		rcu_eqs_exit_common(rdtp, oldval, 1);
+		rcu_eqs_exit_common(rdtp, oldval, true);
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }