|
@@ -559,6 +559,21 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static void threads_cleanup(struct perf_ctx *perf)
|
|
|
+{
|
|
|
+ struct pthr_ctx *pctx;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ perf->run = false;
|
|
|
+ for (i = 0; i < MAX_THREADS; i++) {
|
|
|
+ pctx = &perf->pthr_ctx[i];
|
|
|
+ if (pctx->thread) {
|
|
|
+ kthread_stop(pctx->thread);
|
|
|
+ pctx->thread = NULL;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
|
|
|
size_t count, loff_t *offp)
|
|
|
{
|
|
@@ -574,17 +589,9 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
|
|
|
if (atomic_read(&perf->tsync) == 0)
|
|
|
perf->run = false;
|
|
|
|
|
|
- if (perf->run) {
|
|
|
- /* lets stop the threads */
|
|
|
- perf->run = false;
|
|
|
- for (i = 0; i < MAX_THREADS; i++) {
|
|
|
- if (perf->pthr_ctx[i].thread) {
|
|
|
- kthread_stop(perf->pthr_ctx[i].thread);
|
|
|
- perf->pthr_ctx[i].thread = NULL;
|
|
|
- } else
|
|
|
- break;
|
|
|
- }
|
|
|
- } else {
|
|
|
+ if (perf->run)
|
|
|
+ threads_cleanup(perf);
|
|
|
+ else {
|
|
|
perf->run = true;
|
|
|
|
|
|
if (perf->perf_threads > MAX_THREADS) {
|
|
@@ -616,13 +623,8 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
|
|
|
(void *)pctx,
|
|
|
node, "ntb_perf %d", i);
|
|
|
if (IS_ERR(pctx->thread)) {
|
|
|
- perf->run = false;
|
|
|
- for (i = 0; i < MAX_THREADS; i++) {
|
|
|
- if (pctx->thread) {
|
|
|
- kthread_stop(pctx->thread);
|
|
|
- pctx->thread = NULL;
|
|
|
- }
|
|
|
- }
|
|
|
+ pctx->thread = NULL;
|
|
|
+ goto err;
|
|
|
} else
|
|
|
wake_up_process(pctx->thread);
|
|
|
|
|
@@ -633,6 +635,10 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
|
|
|
}
|
|
|
|
|
|
return count;
|
|
|
+
|
|
|
+err:
|
|
|
+ threads_cleanup(perf);
|
|
|
+ return -ENXIO;
|
|
|
}
|
|
|
|
|
|
static const struct file_operations ntb_perf_debugfs_run = {
|