torture.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719
  1. /*
  2. * Common functions for in-kernel torture tests.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, you can access it online at
  16. * http://www.gnu.org/licenses/gpl-2.0.html.
  17. *
  18. * Copyright (C) IBM Corporation, 2014
  19. *
  20. * Author: Paul E. McKenney <paulmck@us.ibm.com>
  21. * Based on kernel/rcu/torture.c.
  22. */
  23. #include <linux/types.h>
  24. #include <linux/kernel.h>
  25. #include <linux/init.h>
  26. #include <linux/module.h>
  27. #include <linux/kthread.h>
  28. #include <linux/err.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/smp.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/sched.h>
  33. #include <linux/atomic.h>
  34. #include <linux/bitops.h>
  35. #include <linux/completion.h>
  36. #include <linux/moduleparam.h>
  37. #include <linux/percpu.h>
  38. #include <linux/notifier.h>
  39. #include <linux/reboot.h>
  40. #include <linux/freezer.h>
  41. #include <linux/cpu.h>
  42. #include <linux/delay.h>
  43. #include <linux/stat.h>
  44. #include <linux/slab.h>
  45. #include <linux/trace_clock.h>
  46. #include <asm/byteorder.h>
  47. #include <linux/torture.h>
  48. MODULE_LICENSE("GPL");
  49. MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
  50. static char *torture_type;
  51. static bool verbose;
  52. /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
  53. #define FULLSTOP_DONTSTOP 0 /* Normal operation. */
  54. #define FULLSTOP_SHUTDOWN 1 /* System shutdown with torture running. */
  55. #define FULLSTOP_RMMOD 2 /* Normal rmmod of torture. */
  56. static int fullstop = FULLSTOP_RMMOD;
  57. static DEFINE_MUTEX(fullstop_mutex);
  58. static int *torture_runnable;
  59. #ifdef CONFIG_HOTPLUG_CPU
  60. /*
  61. * Variables for online-offline handling. Only present if CPU hotplug
  62. * is enabled, otherwise does nothing.
  63. */
  64. static struct task_struct *onoff_task;
  65. static long onoff_holdoff;
  66. static long onoff_interval;
  67. static long n_offline_attempts;
  68. static long n_offline_successes;
  69. static unsigned long sum_offline;
  70. static int min_offline = -1;
  71. static int max_offline;
  72. static long n_online_attempts;
  73. static long n_online_successes;
  74. static unsigned long sum_online;
  75. static int min_online = -1;
  76. static int max_online;
  77. /*
  78. * Execute random CPU-hotplug operations at the interval specified
  79. * by the onoff_interval.
  80. */
  81. static int
  82. torture_onoff(void *arg)
  83. {
  84. int cpu;
  85. unsigned long delta;
  86. int maxcpu = -1;
  87. DEFINE_TORTURE_RANDOM(rand);
  88. int ret;
  89. unsigned long starttime;
  90. VERBOSE_TOROUT_STRING("torture_onoff task started");
  91. for_each_online_cpu(cpu)
  92. maxcpu = cpu;
  93. WARN_ON(maxcpu < 0);
  94. if (onoff_holdoff > 0) {
  95. VERBOSE_TOROUT_STRING("torture_onoff begin holdoff");
  96. schedule_timeout_interruptible(onoff_holdoff);
  97. VERBOSE_TOROUT_STRING("torture_onoff end holdoff");
  98. }
  99. while (!torture_must_stop()) {
  100. cpu = (torture_random(&rand) >> 4) % (maxcpu + 1);
  101. if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
  102. if (verbose)
  103. pr_alert("%s" TORTURE_FLAG
  104. "torture_onoff task: offlining %d\n",
  105. torture_type, cpu);
  106. starttime = jiffies;
  107. n_offline_attempts++;
  108. ret = cpu_down(cpu);
  109. if (ret) {
  110. if (verbose)
  111. pr_alert("%s" TORTURE_FLAG
  112. "torture_onoff task: offline %d failed: errno %d\n",
  113. torture_type, cpu, ret);
  114. } else {
  115. if (verbose)
  116. pr_alert("%s" TORTURE_FLAG
  117. "torture_onoff task: offlined %d\n",
  118. torture_type, cpu);
  119. n_offline_successes++;
  120. delta = jiffies - starttime;
  121. sum_offline += delta;
  122. if (min_offline < 0) {
  123. min_offline = delta;
  124. max_offline = delta;
  125. }
  126. if (min_offline > delta)
  127. min_offline = delta;
  128. if (max_offline < delta)
  129. max_offline = delta;
  130. }
  131. } else if (cpu_is_hotpluggable(cpu)) {
  132. if (verbose)
  133. pr_alert("%s" TORTURE_FLAG
  134. "torture_onoff task: onlining %d\n",
  135. torture_type, cpu);
  136. starttime = jiffies;
  137. n_online_attempts++;
  138. ret = cpu_up(cpu);
  139. if (ret) {
  140. if (verbose)
  141. pr_alert("%s" TORTURE_FLAG
  142. "torture_onoff task: online %d failed: errno %d\n",
  143. torture_type, cpu, ret);
  144. } else {
  145. if (verbose)
  146. pr_alert("%s" TORTURE_FLAG
  147. "torture_onoff task: onlined %d\n",
  148. torture_type, cpu);
  149. n_online_successes++;
  150. delta = jiffies - starttime;
  151. sum_online += delta;
  152. if (min_online < 0) {
  153. min_online = delta;
  154. max_online = delta;
  155. }
  156. if (min_online > delta)
  157. min_online = delta;
  158. if (max_online < delta)
  159. max_online = delta;
  160. }
  161. }
  162. schedule_timeout_interruptible(onoff_interval);
  163. }
  164. torture_kthread_stopping("torture_onoff");
  165. return 0;
  166. }
  167. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  168. /*
  169. * Initiate online-offline handling.
  170. */
  171. int torture_onoff_init(long ooholdoff, long oointerval)
  172. {
  173. int ret = 0;
  174. #ifdef CONFIG_HOTPLUG_CPU
  175. onoff_holdoff = ooholdoff;
  176. onoff_interval = oointerval;
  177. if (onoff_interval <= 0)
  178. return 0;
  179. ret = torture_create_kthread(torture_onoff, NULL, onoff_task);
  180. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  181. return ret;
  182. }
  183. EXPORT_SYMBOL_GPL(torture_onoff_init);
  184. /*
  185. * Clean up after online/offline testing.
  186. */
  187. static void torture_onoff_cleanup(void)
  188. {
  189. #ifdef CONFIG_HOTPLUG_CPU
  190. if (onoff_task == NULL)
  191. return;
  192. VERBOSE_TOROUT_STRING("Stopping torture_onoff task");
  193. kthread_stop(onoff_task);
  194. onoff_task = NULL;
  195. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  196. }
  197. EXPORT_SYMBOL_GPL(torture_onoff_cleanup);
  198. /*
  199. * Print online/offline testing statistics.
  200. */
  201. char *torture_onoff_stats(char *page)
  202. {
  203. #ifdef CONFIG_HOTPLUG_CPU
  204. page += sprintf(page,
  205. "onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
  206. n_online_successes, n_online_attempts,
  207. n_offline_successes, n_offline_attempts,
  208. min_online, max_online,
  209. min_offline, max_offline,
  210. sum_online, sum_offline, HZ);
  211. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  212. return page;
  213. }
  214. EXPORT_SYMBOL_GPL(torture_onoff_stats);
  215. /*
  216. * Were all the online/offline operations successful?
  217. */
  218. bool torture_onoff_failures(void)
  219. {
  220. #ifdef CONFIG_HOTPLUG_CPU
  221. return n_online_successes != n_online_attempts ||
  222. n_offline_successes != n_offline_attempts;
  223. #else /* #ifdef CONFIG_HOTPLUG_CPU */
  224. return false;
  225. #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
  226. }
  227. EXPORT_SYMBOL_GPL(torture_onoff_failures);
  228. #define TORTURE_RANDOM_MULT 39916801 /* prime */
  229. #define TORTURE_RANDOM_ADD 479001701 /* prime */
  230. #define TORTURE_RANDOM_REFRESH 10000
  231. /*
  232. * Crude but fast random-number generator. Uses a linear congruential
  233. * generator, with occasional help from cpu_clock().
  234. */
  235. unsigned long
  236. torture_random(struct torture_random_state *trsp)
  237. {
  238. if (--trsp->trs_count < 0) {
  239. trsp->trs_state += (unsigned long)local_clock();
  240. trsp->trs_count = TORTURE_RANDOM_REFRESH;
  241. }
  242. trsp->trs_state = trsp->trs_state * TORTURE_RANDOM_MULT +
  243. TORTURE_RANDOM_ADD;
  244. return swahw32(trsp->trs_state);
  245. }
  246. EXPORT_SYMBOL_GPL(torture_random);
  247. /*
  248. * Variables for shuffling. The idea is to ensure that each CPU stays
  249. * idle for an extended period to test interactions with dyntick idle,
  250. * as well as interactions with any per-CPU varibles.
  251. */
  252. struct shuffle_task {
  253. struct list_head st_l;
  254. struct task_struct *st_t;
  255. };
  256. static long shuffle_interval; /* In jiffies. */
  257. static struct task_struct *shuffler_task;
  258. static cpumask_var_t shuffle_tmp_mask;
  259. static int shuffle_idle_cpu; /* Force all torture tasks off this CPU */
  260. static struct list_head shuffle_task_list = LIST_HEAD_INIT(shuffle_task_list);
  261. static DEFINE_MUTEX(shuffle_task_mutex);
  262. /*
  263. * Register a task to be shuffled. If there is no memory, just splat
  264. * and don't bother registering.
  265. */
  266. void torture_shuffle_task_register(struct task_struct *tp)
  267. {
  268. struct shuffle_task *stp;
  269. if (WARN_ON_ONCE(tp == NULL))
  270. return;
  271. stp = kmalloc(sizeof(*stp), GFP_KERNEL);
  272. if (WARN_ON_ONCE(stp == NULL))
  273. return;
  274. stp->st_t = tp;
  275. mutex_lock(&shuffle_task_mutex);
  276. list_add(&stp->st_l, &shuffle_task_list);
  277. mutex_unlock(&shuffle_task_mutex);
  278. }
  279. EXPORT_SYMBOL_GPL(torture_shuffle_task_register);
  280. /*
  281. * Unregister all tasks, for example, at the end of the torture run.
  282. */
  283. static void torture_shuffle_task_unregister_all(void)
  284. {
  285. struct shuffle_task *stp;
  286. struct shuffle_task *p;
  287. mutex_lock(&shuffle_task_mutex);
  288. list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) {
  289. list_del(&stp->st_l);
  290. kfree(stp);
  291. }
  292. mutex_unlock(&shuffle_task_mutex);
  293. }
  294. /* Shuffle tasks such that we allow shuffle_idle_cpu to become idle.
  295. * A special case is when shuffle_idle_cpu = -1, in which case we allow
  296. * the tasks to run on all CPUs.
  297. */
  298. static void torture_shuffle_tasks(void)
  299. {
  300. struct shuffle_task *stp;
  301. cpumask_setall(shuffle_tmp_mask);
  302. get_online_cpus();
  303. /* No point in shuffling if there is only one online CPU (ex: UP) */
  304. if (num_online_cpus() == 1) {
  305. put_online_cpus();
  306. return;
  307. }
  308. /* Advance to the next CPU. Upon overflow, don't idle any CPUs. */
  309. shuffle_idle_cpu = cpumask_next(shuffle_idle_cpu, shuffle_tmp_mask);
  310. if (shuffle_idle_cpu >= nr_cpu_ids)
  311. shuffle_idle_cpu = -1;
  312. if (shuffle_idle_cpu != -1) {
  313. cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask);
  314. if (cpumask_empty(shuffle_tmp_mask)) {
  315. put_online_cpus();
  316. return;
  317. }
  318. }
  319. mutex_lock(&shuffle_task_mutex);
  320. list_for_each_entry(stp, &shuffle_task_list, st_l)
  321. set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask);
  322. mutex_unlock(&shuffle_task_mutex);
  323. put_online_cpus();
  324. }
  325. /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
  326. * system to become idle at a time and cut off its timer ticks. This is meant
  327. * to test the support for such tickless idle CPU in RCU.
  328. */
  329. static int torture_shuffle(void *arg)
  330. {
  331. VERBOSE_TOROUT_STRING("torture_shuffle task started");
  332. do {
  333. schedule_timeout_interruptible(shuffle_interval);
  334. torture_shuffle_tasks();
  335. torture_shutdown_absorb("torture_shuffle");
  336. } while (!torture_must_stop());
  337. torture_kthread_stopping("torture_shuffle");
  338. return 0;
  339. }
  340. /*
  341. * Start the shuffler, with shuffint in jiffies.
  342. */
  343. int torture_shuffle_init(long shuffint)
  344. {
  345. shuffle_interval = shuffint;
  346. shuffle_idle_cpu = -1;
  347. if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
  348. VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask");
  349. return -ENOMEM;
  350. }
  351. /* Create the shuffler thread */
  352. return torture_create_kthread(torture_shuffle, NULL, shuffler_task);
  353. }
  354. EXPORT_SYMBOL_GPL(torture_shuffle_init);
  355. /*
  356. * Stop the shuffling.
  357. */
  358. static void torture_shuffle_cleanup(void)
  359. {
  360. torture_shuffle_task_unregister_all();
  361. if (shuffler_task) {
  362. VERBOSE_TOROUT_STRING("Stopping torture_shuffle task");
  363. kthread_stop(shuffler_task);
  364. free_cpumask_var(shuffle_tmp_mask);
  365. }
  366. shuffler_task = NULL;
  367. }
  368. EXPORT_SYMBOL_GPL(torture_shuffle_cleanup);
  369. /*
  370. * Variables for auto-shutdown. This allows "lights out" torture runs
  371. * to be fully scripted.
  372. */
  373. static int shutdown_secs; /* desired test duration in seconds. */
  374. static struct task_struct *shutdown_task;
  375. static unsigned long shutdown_time; /* jiffies to system shutdown. */
  376. static void (*torture_shutdown_hook)(void);
  377. /*
  378. * Absorb kthreads into a kernel function that won't return, so that
  379. * they won't ever access module text or data again.
  380. */
  381. void torture_shutdown_absorb(const char *title)
  382. {
  383. while (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
  384. pr_notice("torture thread %s parking due to system shutdown\n",
  385. title);
  386. schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
  387. }
  388. }
  389. EXPORT_SYMBOL_GPL(torture_shutdown_absorb);
  390. /*
  391. * Cause the torture test to shutdown the system after the test has
  392. * run for the time specified by the shutdown_secs parameter.
  393. */
  394. static int torture_shutdown(void *arg)
  395. {
  396. long delta;
  397. unsigned long jiffies_snap;
  398. VERBOSE_TOROUT_STRING("torture_shutdown task started");
  399. jiffies_snap = jiffies;
  400. while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
  401. !torture_must_stop()) {
  402. delta = shutdown_time - jiffies_snap;
  403. if (verbose)
  404. pr_alert("%s" TORTURE_FLAG
  405. "torture_shutdown task: %lu jiffies remaining\n",
  406. torture_type, delta);
  407. schedule_timeout_interruptible(delta);
  408. jiffies_snap = jiffies;
  409. }
  410. if (torture_must_stop()) {
  411. torture_kthread_stopping("torture_shutdown");
  412. return 0;
  413. }
  414. /* OK, shut down the system. */
  415. VERBOSE_TOROUT_STRING("torture_shutdown task shutting down system");
  416. shutdown_task = NULL; /* Avoid self-kill deadlock. */
  417. if (torture_shutdown_hook)
  418. torture_shutdown_hook();
  419. else
  420. VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping.");
  421. kernel_power_off(); /* Shut down the system. */
  422. return 0;
  423. }
  424. /*
  425. * Start up the shutdown task.
  426. */
  427. int torture_shutdown_init(int ssecs, void (*cleanup)(void))
  428. {
  429. int ret = 0;
  430. shutdown_secs = ssecs;
  431. torture_shutdown_hook = cleanup;
  432. if (shutdown_secs > 0) {
  433. shutdown_time = jiffies + shutdown_secs * HZ;
  434. ret = torture_create_kthread(torture_shutdown, NULL,
  435. shutdown_task);
  436. }
  437. return ret;
  438. }
  439. EXPORT_SYMBOL_GPL(torture_shutdown_init);
  440. /*
  441. * Detect and respond to a system shutdown.
  442. */
  443. static int torture_shutdown_notify(struct notifier_block *unused1,
  444. unsigned long unused2, void *unused3)
  445. {
  446. mutex_lock(&fullstop_mutex);
  447. if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
  448. VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
  449. ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
  450. } else {
  451. pr_warn("Concurrent rmmod and shutdown illegal!\n");
  452. }
  453. mutex_unlock(&fullstop_mutex);
  454. return NOTIFY_DONE;
  455. }
  456. static struct notifier_block torture_shutdown_nb = {
  457. .notifier_call = torture_shutdown_notify,
  458. };
  459. /*
  460. * Shut down the shutdown task. Say what??? Heh! This can happen if
  461. * the torture module gets an rmmod before the shutdown time arrives. ;-)
  462. */
  463. static void torture_shutdown_cleanup(void)
  464. {
  465. unregister_reboot_notifier(&torture_shutdown_nb);
  466. if (shutdown_task != NULL) {
  467. VERBOSE_TOROUT_STRING("Stopping torture_shutdown task");
  468. kthread_stop(shutdown_task);
  469. }
  470. shutdown_task = NULL;
  471. }
  472. /*
  473. * Variables for stuttering, which means to periodically pause and
  474. * restart testing in order to catch bugs that appear when load is
  475. * suddenly applied to or removed from the system.
  476. */
  477. static struct task_struct *stutter_task;
  478. static int stutter_pause_test;
  479. static int stutter;
  480. /*
  481. * Block until the stutter interval ends. This must be called periodically
  482. * by all running kthreads that need to be subject to stuttering.
  483. */
  484. void stutter_wait(const char *title)
  485. {
  486. while (ACCESS_ONCE(stutter_pause_test) ||
  487. (torture_runnable && !ACCESS_ONCE(*torture_runnable))) {
  488. if (stutter_pause_test)
  489. schedule_timeout_interruptible(1);
  490. else
  491. schedule_timeout_interruptible(round_jiffies_relative(HZ));
  492. torture_shutdown_absorb(title);
  493. }
  494. }
  495. EXPORT_SYMBOL_GPL(stutter_wait);
  496. /*
  497. * Cause the torture test to "stutter", starting and stopping all
  498. * threads periodically.
  499. */
  500. static int torture_stutter(void *arg)
  501. {
  502. VERBOSE_TOROUT_STRING("torture_stutter task started");
  503. do {
  504. if (!torture_must_stop()) {
  505. schedule_timeout_interruptible(stutter);
  506. ACCESS_ONCE(stutter_pause_test) = 1;
  507. }
  508. if (!torture_must_stop())
  509. schedule_timeout_interruptible(stutter);
  510. ACCESS_ONCE(stutter_pause_test) = 0;
  511. torture_shutdown_absorb("torture_stutter");
  512. } while (!torture_must_stop());
  513. torture_kthread_stopping("torture_stutter");
  514. return 0;
  515. }
  516. /*
  517. * Initialize and kick off the torture_stutter kthread.
  518. */
  519. int torture_stutter_init(int s)
  520. {
  521. int ret;
  522. stutter = s;
  523. ret = torture_create_kthread(torture_stutter, NULL, stutter_task);
  524. return ret;
  525. }
  526. EXPORT_SYMBOL_GPL(torture_stutter_init);
  527. /*
  528. * Cleanup after the torture_stutter kthread.
  529. */
  530. static void torture_stutter_cleanup(void)
  531. {
  532. if (!stutter_task)
  533. return;
  534. VERBOSE_TOROUT_STRING("Stopping torture_stutter task");
  535. kthread_stop(stutter_task);
  536. stutter_task = NULL;
  537. }
  538. /*
  539. * Initialize torture module. Please note that this is -not- invoked via
  540. * the usual module_init() mechanism, but rather by an explicit call from
  541. * the client torture module. This call must be paired with a later
  542. * torture_init_end().
  543. *
  544. * The runnable parameter points to a flag that controls whether or not
  545. * the test is currently runnable. If there is no such flag, pass in NULL.
  546. */
  547. void __init torture_init_begin(char *ttype, bool v, int *runnable)
  548. {
  549. mutex_lock(&fullstop_mutex);
  550. torture_type = ttype;
  551. verbose = v;
  552. torture_runnable = runnable;
  553. fullstop = FULLSTOP_DONTSTOP;
  554. }
  555. EXPORT_SYMBOL_GPL(torture_init_begin);
  556. /*
  557. * Tell the torture module that initialization is complete.
  558. */
  559. void __init torture_init_end(void)
  560. {
  561. mutex_unlock(&fullstop_mutex);
  562. register_reboot_notifier(&torture_shutdown_nb);
  563. }
  564. EXPORT_SYMBOL_GPL(torture_init_end);
  565. /*
  566. * Clean up torture module. Please note that this is -not- invoked via
  567. * the usual module_exit() mechanism, but rather by an explicit call from
  568. * the client torture module. Returns true if a race with system shutdown
  569. * is detected, otherwise, all kthreads started by functions in this file
  570. * will be shut down.
  571. *
  572. * This must be called before the caller starts shutting down its own
  573. * kthreads.
  574. */
  575. bool torture_cleanup(void)
  576. {
  577. mutex_lock(&fullstop_mutex);
  578. if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
  579. pr_warn("Concurrent rmmod and shutdown illegal!\n");
  580. mutex_unlock(&fullstop_mutex);
  581. schedule_timeout_uninterruptible(10);
  582. return true;
  583. }
  584. ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
  585. mutex_unlock(&fullstop_mutex);
  586. torture_shutdown_cleanup();
  587. torture_shuffle_cleanup();
  588. torture_stutter_cleanup();
  589. torture_onoff_cleanup();
  590. return false;
  591. }
  592. EXPORT_SYMBOL_GPL(torture_cleanup);
  593. /*
  594. * Is it time for the current torture test to stop?
  595. */
  596. bool torture_must_stop(void)
  597. {
  598. return torture_must_stop_irq() || kthread_should_stop();
  599. }
  600. EXPORT_SYMBOL_GPL(torture_must_stop);
  601. /*
  602. * Is it time for the current torture test to stop? This is the irq-safe
  603. * version, hence no check for kthread_should_stop().
  604. */
  605. bool torture_must_stop_irq(void)
  606. {
  607. return ACCESS_ONCE(fullstop) != FULLSTOP_DONTSTOP;
  608. }
  609. EXPORT_SYMBOL_GPL(torture_must_stop_irq);
  610. /*
  611. * Each kthread must wait for kthread_should_stop() before returning from
  612. * its top-level function, otherwise segfaults ensue. This function
  613. * prints a "stopping" message and waits for kthread_should_stop(), and
  614. * should be called from all torture kthreads immediately prior to
  615. * returning.
  616. */
  617. void torture_kthread_stopping(char *title)
  618. {
  619. if (verbose)
  620. VERBOSE_TOROUT_STRING(title);
  621. while (!kthread_should_stop()) {
  622. torture_shutdown_absorb(title);
  623. schedule_timeout_uninterruptible(1);
  624. }
  625. }
  626. EXPORT_SYMBOL_GPL(torture_kthread_stopping);
  627. /*
  628. * Create a generic torture kthread that is immediately runnable. If you
  629. * need the kthread to be stopped so that you can do something to it before
  630. * it starts, you will need to open-code your own.
  631. */
  632. int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
  633. char *f, struct task_struct **tp)
  634. {
  635. int ret = 0;
  636. VERBOSE_TOROUT_STRING(m);
  637. *tp = kthread_run(fn, arg, s);
  638. if (IS_ERR(*tp)) {
  639. ret = PTR_ERR(*tp);
  640. VERBOSE_TOROUT_ERRSTRING(f);
  641. *tp = NULL;
  642. }
  643. torture_shuffle_task_register(*tp);
  644. return ret;
  645. }
  646. EXPORT_SYMBOL_GPL(_torture_create_kthread);
  647. /*
  648. * Stop a generic kthread, emitting a message.
  649. */
  650. void _torture_stop_kthread(char *m, struct task_struct **tp)
  651. {
  652. if (*tp == NULL)
  653. return;
  654. VERBOSE_TOROUT_STRING(m);
  655. kthread_stop(*tp);
  656. *tp = NULL;
  657. }
  658. EXPORT_SYMBOL_GPL(_torture_stop_kthread);