|
@@ -489,10 +489,9 @@ exit:
|
|
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
|
|
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
|
|
|
|
|
|
/**
|
|
/**
|
|
- * rhashtable_walk_init - Initialise an iterator
|
|
|
|
|
|
+ * rhashtable_walk_enter - Initialise an iterator
|
|
* @ht: Table to walk over
|
|
* @ht: Table to walk over
|
|
* @iter: Hash table Iterator
|
|
* @iter: Hash table Iterator
|
|
- * @gfp: GFP flags for allocations
|
|
|
|
*
|
|
*
|
|
* This function prepares a hash table walk.
|
|
* This function prepares a hash table walk.
|
|
*
|
|
*
|
|
@@ -507,30 +506,22 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
|
|
* This function may sleep so you must not call it from interrupt
|
|
* This function may sleep so you must not call it from interrupt
|
|
* context or with spin locks held.
|
|
* context or with spin locks held.
|
|
*
|
|
*
|
|
- * You must call rhashtable_walk_exit if this function returns
|
|
|
|
- * successfully.
|
|
|
|
|
|
+ * You must call rhashtable_walk_exit after this function returns.
|
|
*/
|
|
*/
|
|
-int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
|
|
|
|
- gfp_t gfp)
|
|
|
|
|
|
+void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
|
|
{
|
|
{
|
|
iter->ht = ht;
|
|
iter->ht = ht;
|
|
iter->p = NULL;
|
|
iter->p = NULL;
|
|
iter->slot = 0;
|
|
iter->slot = 0;
|
|
iter->skip = 0;
|
|
iter->skip = 0;
|
|
|
|
|
|
- iter->walker = kmalloc(sizeof(*iter->walker), gfp);
|
|
|
|
- if (!iter->walker)
|
|
|
|
- return -ENOMEM;
|
|
|
|
-
|
|
|
|
spin_lock(&ht->lock);
|
|
spin_lock(&ht->lock);
|
|
- iter->walker->tbl =
|
|
|
|
|
|
+ iter->walker.tbl =
|
|
rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
|
|
rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
|
|
- list_add(&iter->walker->list, &iter->walker->tbl->walkers);
|
|
|
|
|
|
+ list_add(&iter->walker.list, &iter->walker.tbl->walkers);
|
|
spin_unlock(&ht->lock);
|
|
spin_unlock(&ht->lock);
|
|
-
|
|
|
|
- return 0;
|
|
|
|
}
|
|
}
|
|
-EXPORT_SYMBOL_GPL(rhashtable_walk_init);
|
|
|
|
|
|
+EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
|
|
|
|
|
|
/**
|
|
/**
|
|
* rhashtable_walk_exit - Free an iterator
|
|
* rhashtable_walk_exit - Free an iterator
|
|
@@ -541,10 +532,9 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init);
|
|
void rhashtable_walk_exit(struct rhashtable_iter *iter)
|
|
void rhashtable_walk_exit(struct rhashtable_iter *iter)
|
|
{
|
|
{
|
|
spin_lock(&iter->ht->lock);
|
|
spin_lock(&iter->ht->lock);
|
|
- if (iter->walker->tbl)
|
|
|
|
- list_del(&iter->walker->list);
|
|
|
|
|
|
+ if (iter->walker.tbl)
|
|
|
|
+ list_del(&iter->walker.list);
|
|
spin_unlock(&iter->ht->lock);
|
|
spin_unlock(&iter->ht->lock);
|
|
- kfree(iter->walker);
|
|
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
|
|
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
|
|
|
|
|
|
@@ -570,12 +560,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
|
|
|
|
spin_lock(&ht->lock);
|
|
spin_lock(&ht->lock);
|
|
- if (iter->walker->tbl)
|
|
|
|
- list_del(&iter->walker->list);
|
|
|
|
|
|
+ if (iter->walker.tbl)
|
|
|
|
+ list_del(&iter->walker.list);
|
|
spin_unlock(&ht->lock);
|
|
spin_unlock(&ht->lock);
|
|
|
|
|
|
- if (!iter->walker->tbl) {
|
|
|
|
- iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
|
|
|
|
|
|
+ if (!iter->walker.tbl) {
|
|
|
|
+ iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
|
|
return -EAGAIN;
|
|
return -EAGAIN;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -597,7 +587,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_start);
|
|
*/
|
|
*/
|
|
void *rhashtable_walk_next(struct rhashtable_iter *iter)
|
|
void *rhashtable_walk_next(struct rhashtable_iter *iter)
|
|
{
|
|
{
|
|
- struct bucket_table *tbl = iter->walker->tbl;
|
|
|
|
|
|
+ struct bucket_table *tbl = iter->walker.tbl;
|
|
struct rhashtable *ht = iter->ht;
|
|
struct rhashtable *ht = iter->ht;
|
|
struct rhash_head *p = iter->p;
|
|
struct rhash_head *p = iter->p;
|
|
|
|
|
|
@@ -630,8 +620,8 @@ next:
|
|
/* Ensure we see any new tables. */
|
|
/* Ensure we see any new tables. */
|
|
smp_rmb();
|
|
smp_rmb();
|
|
|
|
|
|
- iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
|
|
|
|
- if (iter->walker->tbl) {
|
|
|
|
|
|
+ iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
|
|
|
|
+ if (iter->walker.tbl) {
|
|
iter->slot = 0;
|
|
iter->slot = 0;
|
|
iter->skip = 0;
|
|
iter->skip = 0;
|
|
return ERR_PTR(-EAGAIN);
|
|
return ERR_PTR(-EAGAIN);
|
|
@@ -651,7 +641,7 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
|
|
__releases(RCU)
|
|
__releases(RCU)
|
|
{
|
|
{
|
|
struct rhashtable *ht;
|
|
struct rhashtable *ht;
|
|
- struct bucket_table *tbl = iter->walker->tbl;
|
|
|
|
|
|
+ struct bucket_table *tbl = iter->walker.tbl;
|
|
|
|
|
|
if (!tbl)
|
|
if (!tbl)
|
|
goto out;
|
|
goto out;
|
|
@@ -660,9 +650,9 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
|
|
|
|
|
|
spin_lock(&ht->lock);
|
|
spin_lock(&ht->lock);
|
|
if (tbl->rehash < tbl->size)
|
|
if (tbl->rehash < tbl->size)
|
|
- list_add(&iter->walker->list, &tbl->walkers);
|
|
|
|
|
|
+ list_add(&iter->walker.list, &tbl->walkers);
|
|
else
|
|
else
|
|
- iter->walker->tbl = NULL;
|
|
|
|
|
|
+ iter->walker.tbl = NULL;
|
|
spin_unlock(&ht->lock);
|
|
spin_unlock(&ht->lock);
|
|
|
|
|
|
iter->p = NULL;
|
|
iter->p = NULL;
|