Browse Source

rhashtable: Factor out bucket_tail() function

Subsequent patches will require access to the bucket tail. Access
to the tail is relatively cheap as the automatic resizing of the
table should keep the number of entries per bucket to no more
than 0.75 on average.

Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
Thomas Graf 10 years ago
parent
commit
b8e1943e9f
1 changed files with 14 additions and 9 deletions
  1. 14 9
      lib/rhashtable.c

+ 14 - 9
lib/rhashtable.c

@@ -83,6 +83,18 @@ static u32 head_hashfn(const struct rhashtable *ht,
 	return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
 }
 
+static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
+{
+	struct rhash_head __rcu **pprev;
+
+	for (pprev = &tbl->buckets[n];
+	     rht_dereference_bucket(*pprev, tbl, n);
+	     pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
+		;
+
+	return pprev;
+}
+
 static struct bucket_table *bucket_table_alloc(size_t nbuckets)
 {
 	struct bucket_table *tbl;
@@ -266,7 +278,6 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
 int rhashtable_shrink(struct rhashtable *ht)
 {
 	struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
-	struct rhash_head __rcu **pprev;
 	unsigned int i;
 
 	ASSERT_RHT_MUTEX(ht);
@@ -286,15 +297,9 @@ int rhashtable_shrink(struct rhashtable *ht)
 	 */
 	for (i = 0; i < ntbl->size; i++) {
 		ntbl->buckets[i] = tbl->buckets[i];
+		RCU_INIT_POINTER(*bucket_tail(ntbl, i),
+				 tbl->buckets[i + ntbl->size]);
 
-		/* Link each bucket in the new table to the first bucket
-		 * in the old table that contains entries which will hash
-		 * to the new bucket.
-		 */
-		for (pprev = &ntbl->buckets[i]; *pprev != NULL;
-		     pprev = &rht_dereference_bucket(*pprev, ntbl, i)->next)
-			;
-		RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
 	}
 
 	/* Publish the new, valid hash table */