|
@@ -52,11 +52,13 @@
|
|
|
* access to counter list:
|
|
|
* - create (user context)
|
|
|
* - mlx5_fc_create() only adds to an addlist to be used by
|
|
|
- * mlx5_fc_stats_query_work(). addlist is protected by a spinlock.
|
|
|
+ * mlx5_fc_stats_query_work(). addlist is a lockless single linked list
|
|
|
+ * that doesn't require any additional synchronization when adding single
|
|
|
+ * node.
|
|
|
* - spawn thread to do the actual destroy
|
|
|
*
|
|
|
* - destroy (user context)
|
|
|
- * - mark a counter as deleted
|
|
|
+ * - add a counter to lockless dellist
|
|
|
* - spawn thread to do the actual del
|
|
|
*
|
|
|
* - dump (user context)
|
|
@@ -71,36 +73,43 @@
|
|
|
* elapsed, the thread will actually query the hardware.
|
|
|
*/
|
|
|
|
|
|
-static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
|
|
|
+static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
|
|
|
+ u32 id)
|
|
|
{
|
|
|
- struct rb_node **new = &root->rb_node;
|
|
|
- struct rb_node *parent = NULL;
|
|
|
-
|
|
|
- while (*new) {
|
|
|
- struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node);
|
|
|
- int result = counter->id - this->id;
|
|
|
-
|
|
|
- parent = *new;
|
|
|
- if (result < 0)
|
|
|
- new = &((*new)->rb_left);
|
|
|
- else
|
|
|
- new = &((*new)->rb_right);
|
|
|
- }
|
|
|
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
|
|
|
+ unsigned long next_id = (unsigned long)id + 1;
|
|
|
+ struct mlx5_fc *counter;
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ /* skip counters that are in idr, but not yet in counters list */
|
|
|
+ while ((counter = idr_get_next_ul(&fc_stats->counters_idr,
|
|
|
+ &next_id)) != NULL &&
|
|
|
+ list_empty(&counter->list))
|
|
|
+ next_id++;
|
|
|
+ rcu_read_unlock();
|
|
|
+
|
|
|
+ return counter ? &counter->list : &fc_stats->counters;
|
|
|
+}
|
|
|
|
|
|
- /* Add new node and rebalance tree. */
|
|
|
- rb_link_node(&counter->node, parent, new);
|
|
|
- rb_insert_color(&counter->node, root);
|
|
|
+static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
|
|
|
+ struct mlx5_fc *counter)
|
|
|
+{
|
|
|
+ struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id);
|
|
|
+
|
|
|
+ list_add_tail(&counter->list, next);
|
|
|
}
|
|
|
|
|
|
-/* The function returns the last node that was queried so the caller
|
|
|
+/* The function returns the last counter that was queried so the caller
|
|
|
* function can continue calling it till all counters are queried.
|
|
|
*/
|
|
|
-static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
|
|
|
+static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
|
|
|
struct mlx5_fc *first,
|
|
|
u32 last_id)
|
|
|
{
|
|
|
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
|
|
|
+ struct mlx5_fc *counter = NULL;
|
|
|
struct mlx5_cmd_fc_bulk *b;
|
|
|
- struct rb_node *node = NULL;
|
|
|
+ bool more = false;
|
|
|
u32 afirst_id;
|
|
|
int num;
|
|
|
int err;
|
|
@@ -130,14 +139,16 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- for (node = &first->node; node; node = rb_next(node)) {
|
|
|
- struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
|
|
|
+ counter = first;
|
|
|
+ list_for_each_entry_from(counter, &fc_stats->counters, list) {
|
|
|
struct mlx5_fc_cache *c = &counter->cache;
|
|
|
u64 packets;
|
|
|
u64 bytes;
|
|
|
|
|
|
- if (counter->id > last_id)
|
|
|
+ if (counter->id > last_id) {
|
|
|
+ more = true;
|
|
|
break;
|
|
|
+ }
|
|
|
|
|
|
mlx5_cmd_fc_bulk_get(dev, b,
|
|
|
counter->id, &packets, &bytes);
|
|
@@ -153,7 +164,14 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
|
|
|
out:
|
|
|
mlx5_cmd_fc_bulk_free(b);
|
|
|
|
|
|
- return node;
|
|
|
+ return more ? counter : NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static void mlx5_free_fc(struct mlx5_core_dev *dev,
|
|
|
+ struct mlx5_fc *counter)
|
|
|
+{
|
|
|
+ mlx5_cmd_fc_free(dev, counter->id);
|
|
|
+ kfree(counter);
|
|
|
}
|
|
|
|
|
|
static void mlx5_fc_stats_work(struct work_struct *work)
|
|
@@ -161,52 +179,33 @@ static void mlx5_fc_stats_work(struct work_struct *work)
|
|
|
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
|
|
|
priv.fc_stats.work.work);
|
|
|
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
|
|
|
+ struct llist_node *tmplist = llist_del_all(&fc_stats->addlist);
|
|
|
+ struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
|
|
|
unsigned long now = jiffies;
|
|
|
- struct mlx5_fc *counter = NULL;
|
|
|
- struct mlx5_fc *last = NULL;
|
|
|
- struct rb_node *node;
|
|
|
- LIST_HEAD(tmplist);
|
|
|
-
|
|
|
- spin_lock(&fc_stats->addlist_lock);
|
|
|
|
|
|
- list_splice_tail_init(&fc_stats->addlist, &tmplist);
|
|
|
-
|
|
|
- if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
|
|
|
+ if (tmplist || !list_empty(&fc_stats->counters))
|
|
|
queue_delayed_work(fc_stats->wq, &fc_stats->work,
|
|
|
fc_stats->sampling_interval);
|
|
|
|
|
|
- spin_unlock(&fc_stats->addlist_lock);
|
|
|
-
|
|
|
- list_for_each_entry(counter, &tmplist, list)
|
|
|
- mlx5_fc_stats_insert(&fc_stats->counters, counter);
|
|
|
-
|
|
|
- node = rb_first(&fc_stats->counters);
|
|
|
- while (node) {
|
|
|
- counter = rb_entry(node, struct mlx5_fc, node);
|
|
|
-
|
|
|
- node = rb_next(node);
|
|
|
-
|
|
|
- if (counter->deleted) {
|
|
|
- rb_erase(&counter->node, &fc_stats->counters);
|
|
|
+ llist_for_each_entry(counter, tmplist, addlist)
|
|
|
+ mlx5_fc_stats_insert(dev, counter);
|
|
|
|
|
|
- mlx5_cmd_fc_free(dev, counter->id);
|
|
|
-
|
|
|
- kfree(counter);
|
|
|
- continue;
|
|
|
- }
|
|
|
+ tmplist = llist_del_all(&fc_stats->dellist);
|
|
|
+ llist_for_each_entry_safe(counter, tmp, tmplist, dellist) {
|
|
|
+ list_del(&counter->list);
|
|
|
|
|
|
- last = counter;
|
|
|
+ mlx5_free_fc(dev, counter);
|
|
|
}
|
|
|
|
|
|
- if (time_before(now, fc_stats->next_query) || !last)
|
|
|
+ if (time_before(now, fc_stats->next_query) ||
|
|
|
+ list_empty(&fc_stats->counters))
|
|
|
return;
|
|
|
+ last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list);
|
|
|
|
|
|
- node = rb_first(&fc_stats->counters);
|
|
|
- while (node) {
|
|
|
- counter = rb_entry(node, struct mlx5_fc, node);
|
|
|
-
|
|
|
- node = mlx5_fc_stats_query(dev, counter, last->id);
|
|
|
- }
|
|
|
+ counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
|
|
|
+ list);
|
|
|
+ while (counter)
|
|
|
+ counter = mlx5_fc_stats_query(dev, counter, last->id);
|
|
|
|
|
|
fc_stats->next_query = now + fc_stats->sampling_interval;
|
|
|
}
|
|
@@ -220,24 +219,38 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
|
|
|
counter = kzalloc(sizeof(*counter), GFP_KERNEL);
|
|
|
if (!counter)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
+ INIT_LIST_HEAD(&counter->list);
|
|
|
|
|
|
err = mlx5_cmd_fc_alloc(dev, &counter->id);
|
|
|
if (err)
|
|
|
goto err_out;
|
|
|
|
|
|
if (aging) {
|
|
|
+ u32 id = counter->id;
|
|
|
+
|
|
|
counter->cache.lastuse = jiffies;
|
|
|
counter->aging = true;
|
|
|
|
|
|
- spin_lock(&fc_stats->addlist_lock);
|
|
|
- list_add(&counter->list, &fc_stats->addlist);
|
|
|
- spin_unlock(&fc_stats->addlist_lock);
|
|
|
+ idr_preload(GFP_KERNEL);
|
|
|
+ spin_lock(&fc_stats->counters_idr_lock);
|
|
|
+
|
|
|
+ err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id,
|
|
|
+ GFP_NOWAIT);
|
|
|
+
|
|
|
+ spin_unlock(&fc_stats->counters_idr_lock);
|
|
|
+ idr_preload_end();
|
|
|
+ if (err)
|
|
|
+ goto err_out_alloc;
|
|
|
+
|
|
|
+ llist_add(&counter->addlist, &fc_stats->addlist);
|
|
|
|
|
|
mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
|
|
|
}
|
|
|
|
|
|
return counter;
|
|
|
|
|
|
+err_out_alloc:
|
|
|
+ mlx5_cmd_fc_free(dev, counter->id);
|
|
|
err_out:
|
|
|
kfree(counter);
|
|
|
|
|
@@ -253,13 +266,16 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
|
|
|
return;
|
|
|
|
|
|
if (counter->aging) {
|
|
|
- counter->deleted = true;
|
|
|
+ spin_lock(&fc_stats->counters_idr_lock);
|
|
|
+ WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
|
|
|
+ spin_unlock(&fc_stats->counters_idr_lock);
|
|
|
+
|
|
|
+ llist_add(&counter->dellist, &fc_stats->dellist);
|
|
|
mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- mlx5_cmd_fc_free(dev, counter->id);
|
|
|
- kfree(counter);
|
|
|
+ mlx5_free_fc(dev, counter);
|
|
|
}
|
|
|
EXPORT_SYMBOL(mlx5_fc_destroy);
|
|
|
|
|
@@ -267,9 +283,11 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
|
|
|
{
|
|
|
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
|
|
|
|
|
|
- fc_stats->counters = RB_ROOT;
|
|
|
- INIT_LIST_HEAD(&fc_stats->addlist);
|
|
|
- spin_lock_init(&fc_stats->addlist_lock);
|
|
|
+ spin_lock_init(&fc_stats->counters_idr_lock);
|
|
|
+ idr_init(&fc_stats->counters_idr);
|
|
|
+ INIT_LIST_HEAD(&fc_stats->counters);
|
|
|
+ init_llist_head(&fc_stats->addlist);
|
|
|
+ init_llist_head(&fc_stats->dellist);
|
|
|
|
|
|
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
|
|
|
if (!fc_stats->wq)
|
|
@@ -284,34 +302,22 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
|
|
|
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
|
|
|
{
|
|
|
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
|
|
|
+ struct llist_node *tmplist;
|
|
|
struct mlx5_fc *counter;
|
|
|
struct mlx5_fc *tmp;
|
|
|
- struct rb_node *node;
|
|
|
|
|
|
cancel_delayed_work_sync(&dev->priv.fc_stats.work);
|
|
|
destroy_workqueue(dev->priv.fc_stats.wq);
|
|
|
dev->priv.fc_stats.wq = NULL;
|
|
|
|
|
|
- list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) {
|
|
|
- list_del(&counter->list);
|
|
|
-
|
|
|
- mlx5_cmd_fc_free(dev, counter->id);
|
|
|
+ idr_destroy(&fc_stats->counters_idr);
|
|
|
|
|
|
- kfree(counter);
|
|
|
- }
|
|
|
+ tmplist = llist_del_all(&fc_stats->addlist);
|
|
|
+ llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
|
|
|
+ mlx5_free_fc(dev, counter);
|
|
|
|
|
|
- node = rb_first(&fc_stats->counters);
|
|
|
- while (node) {
|
|
|
- counter = rb_entry(node, struct mlx5_fc, node);
|
|
|
-
|
|
|
- node = rb_next(node);
|
|
|
-
|
|
|
- rb_erase(&counter->node, &fc_stats->counters);
|
|
|
-
|
|
|
- mlx5_cmd_fc_free(dev, counter->id);
|
|
|
-
|
|
|
- kfree(counter);
|
|
|
- }
|
|
|
+ list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
|
|
|
+ mlx5_free_fc(dev, counter);
|
|
|
}
|
|
|
|
|
|
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
|