|
@@ -65,42 +65,6 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
|
|
|
#define ASSERT_RHT_MUTEX(HT)
|
|
|
#endif
|
|
|
|
|
|
-
|
|
|
-static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
|
|
|
- gfp_t gfp)
|
|
|
-{
|
|
|
- unsigned int i, size;
|
|
|
-#if defined(CONFIG_PROVE_LOCKING)
|
|
|
- unsigned int nr_pcpus = 2;
|
|
|
-#else
|
|
|
- unsigned int nr_pcpus = num_possible_cpus();
|
|
|
-#endif
|
|
|
-
|
|
|
- nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
|
|
|
- size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
|
|
|
-
|
|
|
- /* Never allocate more than 0.5 locks per bucket */
|
|
|
- size = min_t(unsigned int, size, tbl->size >> 1);
|
|
|
-
|
|
|
- if (tbl->nest)
|
|
|
- size = min(size, 1U << tbl->nest);
|
|
|
-
|
|
|
- if (sizeof(spinlock_t) != 0) {
|
|
|
- if (gfpflags_allow_blocking(gfp))
|
|
|
- tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp);
|
|
|
- else
|
|
|
- tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
|
|
|
- gfp);
|
|
|
- if (!tbl->locks)
|
|
|
- return -ENOMEM;
|
|
|
- for (i = 0; i < size; i++)
|
|
|
- spin_lock_init(&tbl->locks[i]);
|
|
|
- }
|
|
|
- tbl->locks_mask = size - 1;
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static void nested_table_free(union nested_table *ntbl, unsigned int size)
|
|
|
{
|
|
|
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
|
|
@@ -140,7 +104,7 @@ static void bucket_table_free(const struct bucket_table *tbl)
|
|
|
if (tbl->nest)
|
|
|
nested_bucket_table_free(tbl);
|
|
|
|
|
|
- kvfree(tbl->locks);
|
|
|
+ free_bucket_spinlocks(tbl->locks);
|
|
|
kvfree(tbl);
|
|
|
}
|
|
|
|
|
@@ -207,7 +171,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
|
|
|
gfp_t gfp)
|
|
|
{
|
|
|
struct bucket_table *tbl = NULL;
|
|
|
- size_t size;
|
|
|
+ size_t size, max_locks;
|
|
|
int i;
|
|
|
|
|
|
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
|
|
@@ -227,7 +191,12 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
|
|
|
|
|
|
tbl->size = size;
|
|
|
|
|
|
- if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
|
|
|
+ max_locks = size >> 1;
|
|
|
+ if (tbl->nest)
|
|
|
+ max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
|
|
|
+
|
|
|
+ if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
|
|
|
+ ht->p.locks_mul, gfp) < 0) {
|
|
|
bucket_table_free(tbl);
|
|
|
return NULL;
|
|
|
}
|
|
@@ -707,6 +676,7 @@ void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
|
|
|
iter->p = NULL;
|
|
|
iter->slot = 0;
|
|
|
iter->skip = 0;
|
|
|
+ iter->end_of_table = 0;
|
|
|
|
|
|
spin_lock(&ht->lock);
|
|
|
iter->walker.tbl =
|
|
@@ -732,7 +702,7 @@ void rhashtable_walk_exit(struct rhashtable_iter *iter)
|
|
|
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
|
|
|
|
|
|
/**
|
|
|
- * rhashtable_walk_start - Start a hash table walk
|
|
|
+ * rhashtable_walk_start_check - Start a hash table walk
|
|
|
* @iter: Hash table iterator
|
|
|
*
|
|
|
* Start a hash table walk at the current iterator position. Note that we take
|
|
@@ -744,8 +714,12 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
|
|
|
* Returns -EAGAIN if resize event occured. Note that the iterator
|
|
|
* will rewind back to the beginning and you may use it immediately
|
|
|
* by calling rhashtable_walk_next.
|
|
|
+ *
|
|
|
+ * rhashtable_walk_start is defined as an inline variant that returns
|
|
|
+ * void. This is preferred in cases where the caller would ignore
|
|
|
+ * resize events and always continue.
|
|
|
*/
|
|
|
-int rhashtable_walk_start(struct rhashtable_iter *iter)
|
|
|
+int rhashtable_walk_start_check(struct rhashtable_iter *iter)
|
|
|
__acquires(RCU)
|
|
|
{
|
|
|
struct rhashtable *ht = iter->ht;
|
|
@@ -757,28 +731,26 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
|
|
|
list_del(&iter->walker.list);
|
|
|
spin_unlock(&ht->lock);
|
|
|
|
|
|
- if (!iter->walker.tbl) {
|
|
|
+ if (!iter->walker.tbl && !iter->end_of_table) {
|
|
|
iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
|
|
|
return -EAGAIN;
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(rhashtable_walk_start);
|
|
|
+EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
|
|
|
|
|
|
/**
|
|
|
- * rhashtable_walk_next - Return the next object and advance the iterator
|
|
|
- * @iter: Hash table iterator
|
|
|
+ * __rhashtable_walk_find_next - Find the next element in a table (or the first
|
|
|
+ * one in case of a new walk).
|
|
|
*
|
|
|
- * Note that you must call rhashtable_walk_stop when you are finished
|
|
|
- * with the walk.
|
|
|
+ * @iter: Hash table iterator
|
|
|
*
|
|
|
- * Returns the next object or NULL when the end of the table is reached.
|
|
|
+ * Returns the found object or NULL when the end of the table is reached.
|
|
|
*
|
|
|
- * Returns -EAGAIN if resize event occured. Note that the iterator
|
|
|
- * will rewind back to the beginning and you may continue to use it.
|
|
|
+ * Returns -EAGAIN if resize event occurred.
|
|
|
*/
|
|
|
-void *rhashtable_walk_next(struct rhashtable_iter *iter)
|
|
|
+static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
|
|
|
{
|
|
|
struct bucket_table *tbl = iter->walker.tbl;
|
|
|
struct rhlist_head *list = iter->list;
|
|
@@ -786,13 +758,8 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
|
|
|
struct rhash_head *p = iter->p;
|
|
|
bool rhlist = ht->rhlist;
|
|
|
|
|
|
- if (p) {
|
|
|
- if (!rhlist || !(list = rcu_dereference(list->next))) {
|
|
|
- p = rcu_dereference(p->next);
|
|
|
- list = container_of(p, struct rhlist_head, rhead);
|
|
|
- }
|
|
|
- goto next;
|
|
|
- }
|
|
|
+ if (!tbl)
|
|
|
+ return NULL;
|
|
|
|
|
|
for (; iter->slot < tbl->size; iter->slot++) {
|
|
|
int skip = iter->skip;
|
|
@@ -836,12 +803,89 @@ next:
|
|
|
iter->slot = 0;
|
|
|
iter->skip = 0;
|
|
|
return ERR_PTR(-EAGAIN);
|
|
|
+ } else {
|
|
|
+ iter->end_of_table = true;
|
|
|
}
|
|
|
|
|
|
return NULL;
|
|
|
}
|
|
|
+
|
|
|
+/**
|
|
|
+ * rhashtable_walk_next - Return the next object and advance the iterator
|
|
|
+ * @iter: Hash table iterator
|
|
|
+ *
|
|
|
+ * Note that you must call rhashtable_walk_stop when you are finished
|
|
|
+ * with the walk.
|
|
|
+ *
|
|
|
+ * Returns the next object or NULL when the end of the table is reached.
|
|
|
+ *
|
|
|
+ * Returns -EAGAIN if resize event occurred. Note that the iterator
|
|
|
+ * will rewind back to the beginning and you may continue to use it.
|
|
|
+ */
|
|
|
+void *rhashtable_walk_next(struct rhashtable_iter *iter)
|
|
|
+{
|
|
|
+ struct rhlist_head *list = iter->list;
|
|
|
+ struct rhashtable *ht = iter->ht;
|
|
|
+ struct rhash_head *p = iter->p;
|
|
|
+ bool rhlist = ht->rhlist;
|
|
|
+
|
|
|
+ if (p) {
|
|
|
+ if (!rhlist || !(list = rcu_dereference(list->next))) {
|
|
|
+ p = rcu_dereference(p->next);
|
|
|
+ list = container_of(p, struct rhlist_head, rhead);
|
|
|
+ }
|
|
|
+ if (!rht_is_a_nulls(p)) {
|
|
|
+ iter->skip++;
|
|
|
+ iter->p = p;
|
|
|
+ iter->list = list;
|
|
|
+ return rht_obj(ht, rhlist ? &list->rhead : p);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* At the end of this slot, switch to next one and then find
|
|
|
+ * next entry from that point.
|
|
|
+ */
|
|
|
+ iter->skip = 0;
|
|
|
+ iter->slot++;
|
|
|
+ }
|
|
|
+
|
|
|
+ return __rhashtable_walk_find_next(iter);
|
|
|
+}
|
|
|
EXPORT_SYMBOL_GPL(rhashtable_walk_next);
|
|
|
|
|
|
+/**
|
|
|
+ * rhashtable_walk_peek - Return the next object but don't advance the iterator
|
|
|
+ * @iter: Hash table iterator
|
|
|
+ *
|
|
|
+ * Returns the next object or NULL when the end of the table is reached.
|
|
|
+ *
|
|
|
+ * Returns -EAGAIN if resize event occurred. Note that the iterator
|
|
|
+ * will rewind back to the beginning and you may continue to use it.
|
|
|
+ */
|
|
|
+void *rhashtable_walk_peek(struct rhashtable_iter *iter)
|
|
|
+{
|
|
|
+ struct rhlist_head *list = iter->list;
|
|
|
+ struct rhashtable *ht = iter->ht;
|
|
|
+ struct rhash_head *p = iter->p;
|
|
|
+
|
|
|
+ if (p)
|
|
|
+ return rht_obj(ht, ht->rhlist ? &list->rhead : p);
|
|
|
+
|
|
|
+ /* No object found in current iter, find next one in the table. */
|
|
|
+
|
|
|
+ if (iter->skip) {
|
|
|
+ /* A nonzero skip value points to the next entry in the table
|
|
|
+ * beyond that last one that was found. Decrement skip so
|
|
|
+ * we find the current value. __rhashtable_walk_find_next
|
|
|
+ * will restore the original value of skip assuming that
|
|
|
+ * the table hasn't changed.
|
|
|
+ */
|
|
|
+ iter->skip--;
|
|
|
+ }
|
|
|
+
|
|
|
+ return __rhashtable_walk_find_next(iter);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
|
|
|
+
|
|
|
/**
|
|
|
* rhashtable_walk_stop - Finish a hash table walk
|
|
|
* @iter: Hash table iterator
|