rcuperf.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. /*
  2. * Read-Copy Update module-based performance-test facility
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, you can access it online at
  16. * http://www.gnu.org/licenses/gpl-2.0.html.
  17. *
  18. * Copyright (C) IBM Corporation, 2015
  19. *
  20. * Authors: Paul E. McKenney <paulmck@us.ibm.com>
  21. */
  22. #include <linux/types.h>
  23. #include <linux/kernel.h>
  24. #include <linux/init.h>
  25. #include <linux/module.h>
  26. #include <linux/kthread.h>
  27. #include <linux/err.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/smp.h>
  30. #include <linux/rcupdate.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/sched.h>
  33. #include <uapi/linux/sched/types.h>
  34. #include <linux/atomic.h>
  35. #include <linux/bitops.h>
  36. #include <linux/completion.h>
  37. #include <linux/moduleparam.h>
  38. #include <linux/percpu.h>
  39. #include <linux/notifier.h>
  40. #include <linux/reboot.h>
  41. #include <linux/freezer.h>
  42. #include <linux/cpu.h>
  43. #include <linux/delay.h>
  44. #include <linux/stat.h>
  45. #include <linux/srcu.h>
  46. #include <linux/slab.h>
  47. #include <asm/byteorder.h>
  48. #include <linux/torture.h>
  49. #include <linux/vmalloc.h>
  50. MODULE_LICENSE("GPL");
  51. MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
  52. #define PERF_FLAG "-perf:"
  53. #define PERFOUT_STRING(s) \
  54. pr_alert("%s" PERF_FLAG " %s\n", perf_type, s)
  55. #define VERBOSE_PERFOUT_STRING(s) \
  56. do { if (verbose) pr_alert("%s" PERF_FLAG " %s\n", perf_type, s); } while (0)
  57. #define VERBOSE_PERFOUT_ERRSTRING(s) \
  58. do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
  59. torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
  60. torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
  61. torture_param(int, nreaders, -1, "Number of RCU reader threads");
  62. torture_param(int, nwriters, -1, "Number of RCU updater threads");
  63. torture_param(bool, shutdown, false, "Shutdown at end of performance tests.");
  64. torture_param(bool, verbose, true, "Enable verbose debugging printk()s");
  65. static char *perf_type = "rcu";
  66. module_param(perf_type, charp, 0444);
  67. MODULE_PARM_DESC(perf_type, "Type of RCU to performance-test (rcu, rcu_bh, ...)");
  68. static int nrealreaders;
  69. static int nrealwriters;
  70. static struct task_struct **writer_tasks;
  71. static struct task_struct **reader_tasks;
  72. static struct task_struct *shutdown_task;
  73. static u64 **writer_durations;
  74. static int *writer_n_durations;
  75. static atomic_t n_rcu_perf_reader_started;
  76. static atomic_t n_rcu_perf_writer_started;
  77. static atomic_t n_rcu_perf_writer_finished;
  78. static wait_queue_head_t shutdown_wq;
  79. static u64 t_rcu_perf_writer_started;
  80. static u64 t_rcu_perf_writer_finished;
  81. static unsigned long b_rcu_perf_writer_started;
  82. static unsigned long b_rcu_perf_writer_finished;
  83. static int rcu_perf_writer_state;
  84. #define RTWS_INIT 0
  85. #define RTWS_EXP_SYNC 1
  86. #define RTWS_SYNC 2
  87. #define RTWS_IDLE 2
  88. #define RTWS_STOPPING 3
  89. #define MAX_MEAS 10000
  90. #define MIN_MEAS 100
  91. static int perf_runnable = IS_ENABLED(MODULE);
  92. module_param(perf_runnable, int, 0444);
  93. MODULE_PARM_DESC(perf_runnable, "Start rcuperf at boot");
  94. /*
  95. * Operations vector for selecting different types of tests.
  96. */
  97. struct rcu_perf_ops {
  98. int ptype;
  99. void (*init)(void);
  100. void (*cleanup)(void);
  101. int (*readlock)(void);
  102. void (*readunlock)(int idx);
  103. unsigned long (*started)(void);
  104. unsigned long (*completed)(void);
  105. unsigned long (*exp_completed)(void);
  106. void (*sync)(void);
  107. void (*exp_sync)(void);
  108. const char *name;
  109. };
  110. static struct rcu_perf_ops *cur_ops;
  111. /*
  112. * Definitions for rcu perf testing.
  113. */
  114. static int rcu_perf_read_lock(void) __acquires(RCU)
  115. {
  116. rcu_read_lock();
  117. return 0;
  118. }
  119. static void rcu_perf_read_unlock(int idx) __releases(RCU)
  120. {
  121. rcu_read_unlock();
  122. }
  123. static unsigned long __maybe_unused rcu_no_completed(void)
  124. {
  125. return 0;
  126. }
  127. static void rcu_sync_perf_init(void)
  128. {
  129. }
  130. static struct rcu_perf_ops rcu_ops = {
  131. .ptype = RCU_FLAVOR,
  132. .init = rcu_sync_perf_init,
  133. .readlock = rcu_perf_read_lock,
  134. .readunlock = rcu_perf_read_unlock,
  135. .started = rcu_batches_started,
  136. .completed = rcu_batches_completed,
  137. .exp_completed = rcu_exp_batches_completed,
  138. .sync = synchronize_rcu,
  139. .exp_sync = synchronize_rcu_expedited,
  140. .name = "rcu"
  141. };
  142. /*
  143. * Definitions for rcu_bh perf testing.
  144. */
  145. static int rcu_bh_perf_read_lock(void) __acquires(RCU_BH)
  146. {
  147. rcu_read_lock_bh();
  148. return 0;
  149. }
  150. static void rcu_bh_perf_read_unlock(int idx) __releases(RCU_BH)
  151. {
  152. rcu_read_unlock_bh();
  153. }
  154. static struct rcu_perf_ops rcu_bh_ops = {
  155. .ptype = RCU_BH_FLAVOR,
  156. .init = rcu_sync_perf_init,
  157. .readlock = rcu_bh_perf_read_lock,
  158. .readunlock = rcu_bh_perf_read_unlock,
  159. .started = rcu_batches_started_bh,
  160. .completed = rcu_batches_completed_bh,
  161. .exp_completed = rcu_exp_batches_completed_sched,
  162. .sync = synchronize_rcu_bh,
  163. .exp_sync = synchronize_rcu_bh_expedited,
  164. .name = "rcu_bh"
  165. };
  166. /*
  167. * Definitions for srcu perf testing.
  168. */
  169. DEFINE_STATIC_SRCU(srcu_ctl_perf);
  170. static struct srcu_struct *srcu_ctlp = &srcu_ctl_perf;
  171. static int srcu_perf_read_lock(void) __acquires(srcu_ctlp)
  172. {
  173. return srcu_read_lock(srcu_ctlp);
  174. }
  175. static void srcu_perf_read_unlock(int idx) __releases(srcu_ctlp)
  176. {
  177. srcu_read_unlock(srcu_ctlp, idx);
  178. }
  179. static unsigned long srcu_perf_completed(void)
  180. {
  181. return srcu_batches_completed(srcu_ctlp);
  182. }
  183. static void srcu_perf_synchronize(void)
  184. {
  185. synchronize_srcu(srcu_ctlp);
  186. }
  187. static void srcu_perf_synchronize_expedited(void)
  188. {
  189. synchronize_srcu_expedited(srcu_ctlp);
  190. }
  191. static struct rcu_perf_ops srcu_ops = {
  192. .ptype = SRCU_FLAVOR,
  193. .init = rcu_sync_perf_init,
  194. .readlock = srcu_perf_read_lock,
  195. .readunlock = srcu_perf_read_unlock,
  196. .started = NULL,
  197. .completed = srcu_perf_completed,
  198. .exp_completed = srcu_perf_completed,
  199. .sync = srcu_perf_synchronize,
  200. .exp_sync = srcu_perf_synchronize_expedited,
  201. .name = "srcu"
  202. };
  203. /*
  204. * Definitions for sched perf testing.
  205. */
  206. static int sched_perf_read_lock(void)
  207. {
  208. preempt_disable();
  209. return 0;
  210. }
  211. static void sched_perf_read_unlock(int idx)
  212. {
  213. preempt_enable();
  214. }
  215. static struct rcu_perf_ops sched_ops = {
  216. .ptype = RCU_SCHED_FLAVOR,
  217. .init = rcu_sync_perf_init,
  218. .readlock = sched_perf_read_lock,
  219. .readunlock = sched_perf_read_unlock,
  220. .started = rcu_batches_started_sched,
  221. .completed = rcu_batches_completed_sched,
  222. .exp_completed = rcu_exp_batches_completed_sched,
  223. .sync = synchronize_sched,
  224. .exp_sync = synchronize_sched_expedited,
  225. .name = "sched"
  226. };
  227. #ifdef CONFIG_TASKS_RCU
  228. /*
  229. * Definitions for RCU-tasks perf testing.
  230. */
  231. static int tasks_perf_read_lock(void)
  232. {
  233. return 0;
  234. }
  235. static void tasks_perf_read_unlock(int idx)
  236. {
  237. }
  238. static struct rcu_perf_ops tasks_ops = {
  239. .ptype = RCU_TASKS_FLAVOR,
  240. .init = rcu_sync_perf_init,
  241. .readlock = tasks_perf_read_lock,
  242. .readunlock = tasks_perf_read_unlock,
  243. .started = rcu_no_completed,
  244. .completed = rcu_no_completed,
  245. .sync = synchronize_rcu_tasks,
  246. .exp_sync = synchronize_rcu_tasks,
  247. .name = "tasks"
  248. };
  249. #define RCUPERF_TASKS_OPS &tasks_ops,
  250. static bool __maybe_unused torturing_tasks(void)
  251. {
  252. return cur_ops == &tasks_ops;
  253. }
  254. #else /* #ifdef CONFIG_TASKS_RCU */
  255. #define RCUPERF_TASKS_OPS
  256. static bool __maybe_unused torturing_tasks(void)
  257. {
  258. return false;
  259. }
  260. #endif /* #else #ifdef CONFIG_TASKS_RCU */
  261. /*
  262. * If performance tests complete, wait for shutdown to commence.
  263. */
  264. static void rcu_perf_wait_shutdown(void)
  265. {
  266. cond_resched_rcu_qs();
  267. if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters)
  268. return;
  269. while (!torture_must_stop())
  270. schedule_timeout_uninterruptible(1);
  271. }
  272. /*
  273. * RCU perf reader kthread. Repeatedly does empty RCU read-side
  274. * critical section, minimizing update-side interference.
  275. */
  276. static int
  277. rcu_perf_reader(void *arg)
  278. {
  279. unsigned long flags;
  280. int idx;
  281. long me = (long)arg;
  282. VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
  283. set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
  284. set_user_nice(current, MAX_NICE);
  285. atomic_inc(&n_rcu_perf_reader_started);
  286. do {
  287. local_irq_save(flags);
  288. idx = cur_ops->readlock();
  289. cur_ops->readunlock(idx);
  290. local_irq_restore(flags);
  291. rcu_perf_wait_shutdown();
  292. } while (!torture_must_stop());
  293. torture_kthread_stopping("rcu_perf_reader");
  294. return 0;
  295. }
  296. /*
  297. * RCU perf writer kthread. Repeatedly does a grace period.
  298. */
  299. static int
  300. rcu_perf_writer(void *arg)
  301. {
  302. int i = 0;
  303. int i_max;
  304. long me = (long)arg;
  305. struct sched_param sp;
  306. bool started = false, done = false, alldone = false;
  307. u64 t;
  308. u64 *wdp;
  309. u64 *wdpp = writer_durations[me];
  310. VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
  311. WARN_ON(!wdpp);
  312. set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
  313. sp.sched_priority = 1;
  314. sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
  315. if (holdoff)
  316. schedule_timeout_uninterruptible(holdoff * HZ);
  317. t = ktime_get_mono_fast_ns();
  318. if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
  319. t_rcu_perf_writer_started = t;
  320. if (gp_exp) {
  321. b_rcu_perf_writer_started =
  322. cur_ops->exp_completed() / 2;
  323. } else {
  324. b_rcu_perf_writer_started =
  325. cur_ops->completed();
  326. }
  327. }
  328. do {
  329. wdp = &wdpp[i];
  330. *wdp = ktime_get_mono_fast_ns();
  331. if (gp_exp) {
  332. rcu_perf_writer_state = RTWS_EXP_SYNC;
  333. cur_ops->exp_sync();
  334. } else {
  335. rcu_perf_writer_state = RTWS_SYNC;
  336. cur_ops->sync();
  337. }
  338. rcu_perf_writer_state = RTWS_IDLE;
  339. t = ktime_get_mono_fast_ns();
  340. *wdp = t - *wdp;
  341. i_max = i;
  342. if (!started &&
  343. atomic_read(&n_rcu_perf_writer_started) >= nrealwriters)
  344. started = true;
  345. if (!done && i >= MIN_MEAS) {
  346. done = true;
  347. sp.sched_priority = 0;
  348. sched_setscheduler_nocheck(current,
  349. SCHED_NORMAL, &sp);
  350. pr_alert("%s%s rcu_perf_writer %ld has %d measurements\n",
  351. perf_type, PERF_FLAG, me, MIN_MEAS);
  352. if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
  353. nrealwriters) {
  354. schedule_timeout_interruptible(10);
  355. rcu_ftrace_dump(DUMP_ALL);
  356. PERFOUT_STRING("Test complete");
  357. t_rcu_perf_writer_finished = t;
  358. if (gp_exp) {
  359. b_rcu_perf_writer_finished =
  360. cur_ops->exp_completed() / 2;
  361. } else {
  362. b_rcu_perf_writer_finished =
  363. cur_ops->completed();
  364. }
  365. if (shutdown) {
  366. smp_mb(); /* Assign before wake. */
  367. wake_up(&shutdown_wq);
  368. }
  369. }
  370. }
  371. if (done && !alldone &&
  372. atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters)
  373. alldone = true;
  374. if (started && !alldone && i < MAX_MEAS - 1)
  375. i++;
  376. rcu_perf_wait_shutdown();
  377. } while (!torture_must_stop());
  378. rcu_perf_writer_state = RTWS_STOPPING;
  379. writer_n_durations[me] = i_max;
  380. torture_kthread_stopping("rcu_perf_writer");
  381. return 0;
  382. }
  383. static inline void
  384. rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
  385. {
  386. pr_alert("%s" PERF_FLAG
  387. "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
  388. perf_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
  389. }
  390. static void
  391. rcu_perf_cleanup(void)
  392. {
  393. int i;
  394. int j;
  395. int ngps = 0;
  396. u64 *wdp;
  397. u64 *wdpp;
  398. if (torture_cleanup_begin())
  399. return;
  400. if (reader_tasks) {
  401. for (i = 0; i < nrealreaders; i++)
  402. torture_stop_kthread(rcu_perf_reader,
  403. reader_tasks[i]);
  404. kfree(reader_tasks);
  405. }
  406. if (writer_tasks) {
  407. for (i = 0; i < nrealwriters; i++) {
  408. torture_stop_kthread(rcu_perf_writer,
  409. writer_tasks[i]);
  410. if (!writer_n_durations)
  411. continue;
  412. j = writer_n_durations[i];
  413. pr_alert("%s%s writer %d gps: %d\n",
  414. perf_type, PERF_FLAG, i, j);
  415. ngps += j;
  416. }
  417. pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
  418. perf_type, PERF_FLAG,
  419. t_rcu_perf_writer_started, t_rcu_perf_writer_finished,
  420. t_rcu_perf_writer_finished -
  421. t_rcu_perf_writer_started,
  422. ngps,
  423. b_rcu_perf_writer_finished -
  424. b_rcu_perf_writer_started);
  425. for (i = 0; i < nrealwriters; i++) {
  426. if (!writer_durations)
  427. break;
  428. if (!writer_n_durations)
  429. continue;
  430. wdpp = writer_durations[i];
  431. if (!wdpp)
  432. continue;
  433. for (j = 0; j <= writer_n_durations[i]; j++) {
  434. wdp = &wdpp[j];
  435. pr_alert("%s%s %4d writer-duration: %5d %llu\n",
  436. perf_type, PERF_FLAG,
  437. i, j, *wdp);
  438. if (j % 100 == 0)
  439. schedule_timeout_uninterruptible(1);
  440. }
  441. kfree(writer_durations[i]);
  442. }
  443. kfree(writer_tasks);
  444. kfree(writer_durations);
  445. kfree(writer_n_durations);
  446. }
  447. /* Do flavor-specific cleanup operations. */
  448. if (cur_ops->cleanup != NULL)
  449. cur_ops->cleanup();
  450. torture_cleanup_end();
  451. }
  452. /*
  453. * Return the number if non-negative. If -1, the number of CPUs.
  454. * If less than -1, that much less than the number of CPUs, but
  455. * at least one.
  456. */
  457. static int compute_real(int n)
  458. {
  459. int nr;
  460. if (n >= 0) {
  461. nr = n;
  462. } else {
  463. nr = num_online_cpus() + 1 + n;
  464. if (nr <= 0)
  465. nr = 1;
  466. }
  467. return nr;
  468. }
  469. /*
  470. * RCU perf shutdown kthread. Just waits to be awakened, then shuts
  471. * down system.
  472. */
  473. static int
  474. rcu_perf_shutdown(void *arg)
  475. {
  476. do {
  477. wait_event(shutdown_wq,
  478. atomic_read(&n_rcu_perf_writer_finished) >=
  479. nrealwriters);
  480. } while (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters);
  481. smp_mb(); /* Wake before output. */
  482. rcu_perf_cleanup();
  483. kernel_power_off();
  484. return -EINVAL;
  485. }
  486. static int __init
  487. rcu_perf_init(void)
  488. {
  489. long i;
  490. int firsterr = 0;
  491. static struct rcu_perf_ops *perf_ops[] = {
  492. &rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops,
  493. RCUPERF_TASKS_OPS
  494. };
  495. if (!torture_init_begin(perf_type, verbose, &perf_runnable))
  496. return -EBUSY;
  497. /* Process args and tell the world that the perf'er is on the job. */
  498. for (i = 0; i < ARRAY_SIZE(perf_ops); i++) {
  499. cur_ops = perf_ops[i];
  500. if (strcmp(perf_type, cur_ops->name) == 0)
  501. break;
  502. }
  503. if (i == ARRAY_SIZE(perf_ops)) {
  504. pr_alert("rcu-perf: invalid perf type: \"%s\"\n",
  505. perf_type);
  506. pr_alert("rcu-perf types:");
  507. for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
  508. pr_alert(" %s", perf_ops[i]->name);
  509. pr_alert("\n");
  510. firsterr = -EINVAL;
  511. goto unwind;
  512. }
  513. if (cur_ops->init)
  514. cur_ops->init();
  515. nrealwriters = compute_real(nwriters);
  516. nrealreaders = compute_real(nreaders);
  517. atomic_set(&n_rcu_perf_reader_started, 0);
  518. atomic_set(&n_rcu_perf_writer_started, 0);
  519. atomic_set(&n_rcu_perf_writer_finished, 0);
  520. rcu_perf_print_module_parms(cur_ops, "Start of test");
  521. /* Start up the kthreads. */
  522. if (shutdown) {
  523. init_waitqueue_head(&shutdown_wq);
  524. firsterr = torture_create_kthread(rcu_perf_shutdown, NULL,
  525. shutdown_task);
  526. if (firsterr)
  527. goto unwind;
  528. schedule_timeout_uninterruptible(1);
  529. }
  530. reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
  531. GFP_KERNEL);
  532. if (reader_tasks == NULL) {
  533. VERBOSE_PERFOUT_ERRSTRING("out of memory");
  534. firsterr = -ENOMEM;
  535. goto unwind;
  536. }
  537. for (i = 0; i < nrealreaders; i++) {
  538. firsterr = torture_create_kthread(rcu_perf_reader, (void *)i,
  539. reader_tasks[i]);
  540. if (firsterr)
  541. goto unwind;
  542. }
  543. while (atomic_read(&n_rcu_perf_reader_started) < nrealreaders)
  544. schedule_timeout_uninterruptible(1);
  545. writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
  546. GFP_KERNEL);
  547. writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
  548. GFP_KERNEL);
  549. writer_n_durations =
  550. kcalloc(nrealwriters, sizeof(*writer_n_durations),
  551. GFP_KERNEL);
  552. if (!writer_tasks || !writer_durations || !writer_n_durations) {
  553. VERBOSE_PERFOUT_ERRSTRING("out of memory");
  554. firsterr = -ENOMEM;
  555. goto unwind;
  556. }
  557. if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp) {
  558. VERBOSE_PERFOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
  559. firsterr = -EINVAL;
  560. goto unwind;
  561. }
  562. if (rcu_gp_is_normal() && gp_exp) {
  563. VERBOSE_PERFOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
  564. firsterr = -EINVAL;
  565. goto unwind;
  566. }
  567. for (i = 0; i < nrealwriters; i++) {
  568. writer_durations[i] =
  569. kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
  570. GFP_KERNEL);
  571. if (!writer_durations[i]) {
  572. firsterr = -ENOMEM;
  573. goto unwind;
  574. }
  575. firsterr = torture_create_kthread(rcu_perf_writer, (void *)i,
  576. writer_tasks[i]);
  577. if (firsterr)
  578. goto unwind;
  579. }
  580. torture_init_end();
  581. return 0;
  582. unwind:
  583. torture_init_end();
  584. rcu_perf_cleanup();
  585. return firsterr;
  586. }
  587. module_init(rcu_perf_init);
  588. module_exit(rcu_perf_cleanup);