|
@@ -83,7 +83,7 @@ void knav_queue_notify(struct knav_queue_inst *inst)
|
|
|
continue;
|
|
|
if (WARN_ON(!qh->notifier_fn))
|
|
|
continue;
|
|
|
- atomic_inc(&qh->stats.notifies);
|
|
|
+ this_cpu_inc(qh->stats->notifies);
|
|
|
qh->notifier_fn(qh->notifier_fn_arg);
|
|
|
}
|
|
|
rcu_read_unlock();
|
|
@@ -214,6 +214,12 @@ static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
|
|
|
if (!qh)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
+ qh->stats = alloc_percpu(struct knav_queue_stats);
|
|
|
+ if (!qh->stats) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+
|
|
|
qh->flags = flags;
|
|
|
qh->inst = inst;
|
|
|
id = inst->id - inst->qmgr->start_queue;
|
|
@@ -229,13 +235,17 @@ static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
|
|
|
if (range->ops && range->ops->open_queue)
|
|
|
ret = range->ops->open_queue(range, inst, flags);
|
|
|
|
|
|
- if (ret) {
|
|
|
- devm_kfree(inst->kdev->dev, qh);
|
|
|
- return ERR_PTR(ret);
|
|
|
- }
|
|
|
+ if (ret)
|
|
|
+ goto err;
|
|
|
}
|
|
|
list_add_tail_rcu(&qh->list, &inst->handles);
|
|
|
return qh;
|
|
|
+
|
|
|
+err:
|
|
|
+ if (qh->stats)
|
|
|
+ free_percpu(qh->stats);
|
|
|
+ devm_kfree(inst->kdev->dev, qh);
|
|
|
+ return ERR_PTR(ret);
|
|
|
}
|
|
|
|
|
|
static struct knav_queue *
|
|
@@ -411,6 +421,12 @@ static void knav_queue_debug_show_instance(struct seq_file *s,
|
|
|
{
|
|
|
struct knav_device *kdev = inst->kdev;
|
|
|
struct knav_queue *qh;
|
|
|
+ int cpu = 0;
|
|
|
+ int pushes = 0;
|
|
|
+ int pops = 0;
|
|
|
+ int push_errors = 0;
|
|
|
+ int pop_errors = 0;
|
|
|
+ int notifies = 0;
|
|
|
|
|
|
if (!knav_queue_is_busy(inst))
|
|
|
return;
|
|
@@ -418,19 +434,22 @@ static void knav_queue_debug_show_instance(struct seq_file *s,
|
|
|
seq_printf(s, "\tqueue id %d (%s)\n",
|
|
|
kdev->base_id + inst->id, inst->name);
|
|
|
for_each_handle_rcu(qh, inst) {
|
|
|
- seq_printf(s, "\t\thandle %p: ", qh);
|
|
|
- seq_printf(s, "pushes %8d, ",
|
|
|
- atomic_read(&qh->stats.pushes));
|
|
|
- seq_printf(s, "pops %8d, ",
|
|
|
- atomic_read(&qh->stats.pops));
|
|
|
- seq_printf(s, "count %8d, ",
|
|
|
- knav_queue_get_count(qh));
|
|
|
- seq_printf(s, "notifies %8d, ",
|
|
|
- atomic_read(&qh->stats.notifies));
|
|
|
- seq_printf(s, "push errors %8d, ",
|
|
|
- atomic_read(&qh->stats.push_errors));
|
|
|
- seq_printf(s, "pop errors %8d\n",
|
|
|
- atomic_read(&qh->stats.pop_errors));
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+ pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
|
|
|
+ pops += per_cpu_ptr(qh->stats, cpu)->pops;
|
|
|
+ push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
|
|
|
+ pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
|
|
|
+ notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
|
|
|
+ }
|
|
|
+
|
|
|
+ seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
|
|
|
+ qh,
|
|
|
+ pushes,
|
|
|
+ pops,
|
|
|
+ knav_queue_get_count(qh),
|
|
|
+ notifies,
|
|
|
+ push_errors,
|
|
|
+ pop_errors);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -547,6 +566,7 @@ void knav_queue_close(void *qhandle)
|
|
|
if (range->ops && range->ops->close_queue)
|
|
|
range->ops->close_queue(range, inst);
|
|
|
}
|
|
|
+ free_percpu(qh->stats);
|
|
|
devm_kfree(inst->kdev->dev, qh);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(knav_queue_close);
|
|
@@ -620,7 +640,7 @@ int knav_queue_push(void *qhandle, dma_addr_t dma,
|
|
|
val = (u32)dma | ((size / 16) - 1);
|
|
|
writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
|
|
|
|
|
|
- atomic_inc(&qh->stats.pushes);
|
|
|
+ this_cpu_inc(qh->stats->pushes);
|
|
|
return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(knav_queue_push);
|
|
@@ -658,7 +678,7 @@ dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
|
|
|
if (size)
|
|
|
*size = ((val & DESC_SIZE_MASK) + 1) * 16;
|
|
|
|
|
|
- atomic_inc(&qh->stats.pops);
|
|
|
+ this_cpu_inc(qh->stats->pops);
|
|
|
return dma;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(knav_queue_pop);
|