Browse Source

rcu: Eliminate unused expedited_normal counter

Expedited grace periods no longer fall back to normal grace periods
in response to lock contention, given that expedited grace periods
now use the rcu_node tree so as to avoid contention.  This commit
therfore removes the expedited_normal counter.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Paul E. McKenney 8 years ago
parent
commit
bb4e2c08bb
3 changed files with 2 additions and 7 deletions
  1. 1 4
      Documentation/RCU/trace.txt
  2. 0 1
      kernel/rcu/tree.h
  3. 1 2
      kernel/rcu/tree_trace.c

+ 1 - 4
Documentation/RCU/trace.txt

@@ -237,7 +237,7 @@ o	"ktl" is the low-order 16 bits (in hexadecimal) of the count of
 
 
 The output of "cat rcu/rcu_preempt/rcuexp" looks as follows:
 The output of "cat rcu/rcu_preempt/rcuexp" looks as follows:
 
 
-s=21872 wd1=0 wd2=0 wd3=5 n=0 enq=0 sc=21872
+s=21872 wd1=0 wd2=0 wd3=5 enq=0 sc=21872
 
 
 These fields are as follows:
 These fields are as follows:
 
 
@@ -249,9 +249,6 @@ o	"wd1", "wd2", and "wd3" are the number of times that an attempt
 	completed an expedited grace period that satisfies the attempted
 	completed an expedited grace period that satisfies the attempted
 	request.  "Our work is done."
 	request.  "Our work is done."
 
 
-o	"n" is number of times that a concurrent CPU-hotplug operation
-	forced a fallback to a normal grace period.
-
 o	"enq" is the number of quiescent states still outstanding.
 o	"enq" is the number of quiescent states still outstanding.
 
 
 o	"sc" is the number of times that the attempt to start a
 o	"sc" is the number of times that the attempt to start a

+ 0 - 1
kernel/rcu/tree.h

@@ -521,7 +521,6 @@ struct rcu_state {
 	struct mutex exp_mutex;			/* Serialize expedited GP. */
 	struct mutex exp_mutex;			/* Serialize expedited GP. */
 	struct mutex exp_wake_mutex;		/* Serialize wakeup. */
 	struct mutex exp_wake_mutex;		/* Serialize wakeup. */
 	unsigned long expedited_sequence;	/* Take a ticket. */
 	unsigned long expedited_sequence;	/* Take a ticket. */
-	atomic_long_t expedited_normal;		/* # fallbacks to normal. */
 	atomic_t expedited_need_qs;		/* # CPUs left to check in. */
 	atomic_t expedited_need_qs;		/* # CPUs left to check in. */
 	struct swait_queue_head expedited_wq;	/* Wait for check-ins. */
 	struct swait_queue_head expedited_wq;	/* Wait for check-ins. */
 	int ncpus_snap;				/* # CPUs seen last time. */
 	int ncpus_snap;				/* # CPUs seen last time. */

+ 1 - 2
kernel/rcu/tree_trace.c

@@ -194,9 +194,8 @@ static int show_rcuexp(struct seq_file *m, void *v)
 		s2 += atomic_long_read(&rdp->exp_workdone2);
 		s2 += atomic_long_read(&rdp->exp_workdone2);
 		s3 += atomic_long_read(&rdp->exp_workdone3);
 		s3 += atomic_long_read(&rdp->exp_workdone3);
 	}
 	}
-	seq_printf(m, "s=%lu wd0=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n",
+	seq_printf(m, "s=%lu wd0=%lu wd1=%lu wd2=%lu wd3=%lu enq=%d sc=%lu\n",
 		   rsp->expedited_sequence, s0, s1, s2, s3,
 		   rsp->expedited_sequence, s0, s1, s2, s3,
-		   atomic_long_read(&rsp->expedited_normal),
 		   atomic_read(&rsp->expedited_need_qs),
 		   atomic_read(&rsp->expedited_need_qs),
 		   rsp->expedited_sequence / 2);
 		   rsp->expedited_sequence / 2);
 	return 0;
 	return 0;