locktorture.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057
  1. /*
  2. * Module-based torture test facility for locking
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, you can access it online at
  16. * http://www.gnu.org/licenses/gpl-2.0.html.
  17. *
  18. * Copyright (C) IBM Corporation, 2014
  19. *
  20. * Authors: Paul E. McKenney <paulmck@us.ibm.com>
  21. * Davidlohr Bueso <dave@stgolabs.net>
  22. * Based on kernel/rcu/torture.c.
  23. */
  24. #include <linux/kernel.h>
  25. #include <linux/module.h>
  26. #include <linux/kthread.h>
  27. #include <linux/sched/rt.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/rwlock.h>
  30. #include <linux/mutex.h>
  31. #include <linux/rwsem.h>
  32. #include <linux/smp.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/sched.h>
  35. #include <uapi/linux/sched/types.h>
  36. #include <linux/rtmutex.h>
  37. #include <linux/atomic.h>
  38. #include <linux/moduleparam.h>
  39. #include <linux/delay.h>
  40. #include <linux/slab.h>
  41. #include <linux/percpu-rwsem.h>
  42. #include <linux/torture.h>
  43. MODULE_LICENSE("GPL");
  44. MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
  45. torture_param(int, nwriters_stress, -1,
  46. "Number of write-locking stress-test threads");
  47. torture_param(int, nreaders_stress, -1,
  48. "Number of read-locking stress-test threads");
  49. torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
  50. torture_param(int, onoff_interval, 0,
  51. "Time between CPU hotplugs (s), 0=disable");
  52. torture_param(int, shuffle_interval, 3,
  53. "Number of jiffies between shuffles, 0=disable");
  54. torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
  55. torture_param(int, stat_interval, 60,
  56. "Number of seconds between stats printk()s");
  57. torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
  58. torture_param(bool, verbose, true,
  59. "Enable verbose debugging printk()s");
  60. static char *torture_type = "spin_lock";
  61. module_param(torture_type, charp, 0444);
  62. MODULE_PARM_DESC(torture_type,
  63. "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
  64. static struct task_struct *stats_task;
  65. static struct task_struct **writer_tasks;
  66. static struct task_struct **reader_tasks;
  67. static bool lock_is_write_held;
  68. static bool lock_is_read_held;
  69. struct lock_stress_stats {
  70. long n_lock_fail;
  71. long n_lock_acquired;
  72. };
  73. int torture_runnable = IS_ENABLED(MODULE);
  74. module_param(torture_runnable, int, 0444);
  75. MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
  76. /* Forward reference. */
  77. static void lock_torture_cleanup(void);
  78. /*
  79. * Operations vector for selecting different types of tests.
  80. */
  81. struct lock_torture_ops {
  82. void (*init)(void);
  83. int (*writelock)(void);
  84. void (*write_delay)(struct torture_random_state *trsp);
  85. void (*task_boost)(struct torture_random_state *trsp);
  86. void (*writeunlock)(void);
  87. int (*readlock)(void);
  88. void (*read_delay)(struct torture_random_state *trsp);
  89. void (*readunlock)(void);
  90. unsigned long flags; /* for irq spinlocks */
  91. const char *name;
  92. };
  93. struct lock_torture_cxt {
  94. int nrealwriters_stress;
  95. int nrealreaders_stress;
  96. bool debug_lock;
  97. atomic_t n_lock_torture_errors;
  98. struct lock_torture_ops *cur_ops;
  99. struct lock_stress_stats *lwsa; /* writer statistics */
  100. struct lock_stress_stats *lrsa; /* reader statistics */
  101. };
  102. static struct lock_torture_cxt cxt = { 0, 0, false,
  103. ATOMIC_INIT(0),
  104. NULL, NULL};
  105. /*
  106. * Definitions for lock torture testing.
  107. */
  108. static int torture_lock_busted_write_lock(void)
  109. {
  110. return 0; /* BUGGY, do not use in real life!!! */
  111. }
  112. static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
  113. {
  114. const unsigned long longdelay_ms = 100;
  115. /* We want a long delay occasionally to force massive contention. */
  116. if (!(torture_random(trsp) %
  117. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  118. mdelay(longdelay_ms);
  119. #ifdef CONFIG_PREEMPT
  120. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  121. preempt_schedule(); /* Allow test to be preempted. */
  122. #endif
  123. }
  124. static void torture_lock_busted_write_unlock(void)
  125. {
  126. /* BUGGY, do not use in real life!!! */
  127. }
  128. static void torture_boost_dummy(struct torture_random_state *trsp)
  129. {
  130. /* Only rtmutexes care about priority */
  131. }
  132. static struct lock_torture_ops lock_busted_ops = {
  133. .writelock = torture_lock_busted_write_lock,
  134. .write_delay = torture_lock_busted_write_delay,
  135. .task_boost = torture_boost_dummy,
  136. .writeunlock = torture_lock_busted_write_unlock,
  137. .readlock = NULL,
  138. .read_delay = NULL,
  139. .readunlock = NULL,
  140. .name = "lock_busted"
  141. };
  142. static DEFINE_SPINLOCK(torture_spinlock);
  143. static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
  144. {
  145. spin_lock(&torture_spinlock);
  146. return 0;
  147. }
  148. static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
  149. {
  150. const unsigned long shortdelay_us = 2;
  151. const unsigned long longdelay_ms = 100;
  152. /* We want a short delay mostly to emulate likely code, and
  153. * we want a long delay occasionally to force massive contention.
  154. */
  155. if (!(torture_random(trsp) %
  156. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  157. mdelay(longdelay_ms);
  158. if (!(torture_random(trsp) %
  159. (cxt.nrealwriters_stress * 2 * shortdelay_us)))
  160. udelay(shortdelay_us);
  161. #ifdef CONFIG_PREEMPT
  162. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  163. preempt_schedule(); /* Allow test to be preempted. */
  164. #endif
  165. }
  166. static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
  167. {
  168. spin_unlock(&torture_spinlock);
  169. }
  170. static struct lock_torture_ops spin_lock_ops = {
  171. .writelock = torture_spin_lock_write_lock,
  172. .write_delay = torture_spin_lock_write_delay,
  173. .task_boost = torture_boost_dummy,
  174. .writeunlock = torture_spin_lock_write_unlock,
  175. .readlock = NULL,
  176. .read_delay = NULL,
  177. .readunlock = NULL,
  178. .name = "spin_lock"
  179. };
  180. static int torture_spin_lock_write_lock_irq(void)
  181. __acquires(torture_spinlock)
  182. {
  183. unsigned long flags;
  184. spin_lock_irqsave(&torture_spinlock, flags);
  185. cxt.cur_ops->flags = flags;
  186. return 0;
  187. }
  188. static void torture_lock_spin_write_unlock_irq(void)
  189. __releases(torture_spinlock)
  190. {
  191. spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
  192. }
  193. static struct lock_torture_ops spin_lock_irq_ops = {
  194. .writelock = torture_spin_lock_write_lock_irq,
  195. .write_delay = torture_spin_lock_write_delay,
  196. .task_boost = torture_boost_dummy,
  197. .writeunlock = torture_lock_spin_write_unlock_irq,
  198. .readlock = NULL,
  199. .read_delay = NULL,
  200. .readunlock = NULL,
  201. .name = "spin_lock_irq"
  202. };
  203. static DEFINE_RWLOCK(torture_rwlock);
  204. static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
  205. {
  206. write_lock(&torture_rwlock);
  207. return 0;
  208. }
  209. static void torture_rwlock_write_delay(struct torture_random_state *trsp)
  210. {
  211. const unsigned long shortdelay_us = 2;
  212. const unsigned long longdelay_ms = 100;
  213. /* We want a short delay mostly to emulate likely code, and
  214. * we want a long delay occasionally to force massive contention.
  215. */
  216. if (!(torture_random(trsp) %
  217. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  218. mdelay(longdelay_ms);
  219. else
  220. udelay(shortdelay_us);
  221. }
  222. static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
  223. {
  224. write_unlock(&torture_rwlock);
  225. }
  226. static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
  227. {
  228. read_lock(&torture_rwlock);
  229. return 0;
  230. }
  231. static void torture_rwlock_read_delay(struct torture_random_state *trsp)
  232. {
  233. const unsigned long shortdelay_us = 10;
  234. const unsigned long longdelay_ms = 100;
  235. /* We want a short delay mostly to emulate likely code, and
  236. * we want a long delay occasionally to force massive contention.
  237. */
  238. if (!(torture_random(trsp) %
  239. (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
  240. mdelay(longdelay_ms);
  241. else
  242. udelay(shortdelay_us);
  243. }
  244. static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
  245. {
  246. read_unlock(&torture_rwlock);
  247. }
  248. static struct lock_torture_ops rw_lock_ops = {
  249. .writelock = torture_rwlock_write_lock,
  250. .write_delay = torture_rwlock_write_delay,
  251. .task_boost = torture_boost_dummy,
  252. .writeunlock = torture_rwlock_write_unlock,
  253. .readlock = torture_rwlock_read_lock,
  254. .read_delay = torture_rwlock_read_delay,
  255. .readunlock = torture_rwlock_read_unlock,
  256. .name = "rw_lock"
  257. };
  258. static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
  259. {
  260. unsigned long flags;
  261. write_lock_irqsave(&torture_rwlock, flags);
  262. cxt.cur_ops->flags = flags;
  263. return 0;
  264. }
  265. static void torture_rwlock_write_unlock_irq(void)
  266. __releases(torture_rwlock)
  267. {
  268. write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
  269. }
  270. static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
  271. {
  272. unsigned long flags;
  273. read_lock_irqsave(&torture_rwlock, flags);
  274. cxt.cur_ops->flags = flags;
  275. return 0;
  276. }
  277. static void torture_rwlock_read_unlock_irq(void)
  278. __releases(torture_rwlock)
  279. {
  280. read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
  281. }
  282. static struct lock_torture_ops rw_lock_irq_ops = {
  283. .writelock = torture_rwlock_write_lock_irq,
  284. .write_delay = torture_rwlock_write_delay,
  285. .task_boost = torture_boost_dummy,
  286. .writeunlock = torture_rwlock_write_unlock_irq,
  287. .readlock = torture_rwlock_read_lock_irq,
  288. .read_delay = torture_rwlock_read_delay,
  289. .readunlock = torture_rwlock_read_unlock_irq,
  290. .name = "rw_lock_irq"
  291. };
  292. static DEFINE_MUTEX(torture_mutex);
  293. static int torture_mutex_lock(void) __acquires(torture_mutex)
  294. {
  295. mutex_lock(&torture_mutex);
  296. return 0;
  297. }
  298. static void torture_mutex_delay(struct torture_random_state *trsp)
  299. {
  300. const unsigned long longdelay_ms = 100;
  301. /* We want a long delay occasionally to force massive contention. */
  302. if (!(torture_random(trsp) %
  303. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  304. mdelay(longdelay_ms * 5);
  305. else
  306. mdelay(longdelay_ms / 5);
  307. #ifdef CONFIG_PREEMPT
  308. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  309. preempt_schedule(); /* Allow test to be preempted. */
  310. #endif
  311. }
  312. static void torture_mutex_unlock(void) __releases(torture_mutex)
  313. {
  314. mutex_unlock(&torture_mutex);
  315. }
  316. static struct lock_torture_ops mutex_lock_ops = {
  317. .writelock = torture_mutex_lock,
  318. .write_delay = torture_mutex_delay,
  319. .task_boost = torture_boost_dummy,
  320. .writeunlock = torture_mutex_unlock,
  321. .readlock = NULL,
  322. .read_delay = NULL,
  323. .readunlock = NULL,
  324. .name = "mutex_lock"
  325. };
  326. #include <linux/ww_mutex.h>
  327. static DEFINE_WW_CLASS(torture_ww_class);
  328. static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
  329. static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
  330. static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
  331. static int torture_ww_mutex_lock(void)
  332. __acquires(torture_ww_mutex_0)
  333. __acquires(torture_ww_mutex_1)
  334. __acquires(torture_ww_mutex_2)
  335. {
  336. LIST_HEAD(list);
  337. struct reorder_lock {
  338. struct list_head link;
  339. struct ww_mutex *lock;
  340. } locks[3], *ll, *ln;
  341. struct ww_acquire_ctx ctx;
  342. locks[0].lock = &torture_ww_mutex_0;
  343. list_add(&locks[0].link, &list);
  344. locks[1].lock = &torture_ww_mutex_1;
  345. list_add(&locks[1].link, &list);
  346. locks[2].lock = &torture_ww_mutex_2;
  347. list_add(&locks[2].link, &list);
  348. ww_acquire_init(&ctx, &torture_ww_class);
  349. list_for_each_entry(ll, &list, link) {
  350. int err;
  351. err = ww_mutex_lock(ll->lock, &ctx);
  352. if (!err)
  353. continue;
  354. ln = ll;
  355. list_for_each_entry_continue_reverse(ln, &list, link)
  356. ww_mutex_unlock(ln->lock);
  357. if (err != -EDEADLK)
  358. return err;
  359. ww_mutex_lock_slow(ll->lock, &ctx);
  360. list_move(&ll->link, &list);
  361. }
  362. ww_acquire_fini(&ctx);
  363. return 0;
  364. }
  365. static void torture_ww_mutex_unlock(void)
  366. __releases(torture_ww_mutex_0)
  367. __releases(torture_ww_mutex_1)
  368. __releases(torture_ww_mutex_2)
  369. {
  370. ww_mutex_unlock(&torture_ww_mutex_0);
  371. ww_mutex_unlock(&torture_ww_mutex_1);
  372. ww_mutex_unlock(&torture_ww_mutex_2);
  373. }
  374. static struct lock_torture_ops ww_mutex_lock_ops = {
  375. .writelock = torture_ww_mutex_lock,
  376. .write_delay = torture_mutex_delay,
  377. .task_boost = torture_boost_dummy,
  378. .writeunlock = torture_ww_mutex_unlock,
  379. .readlock = NULL,
  380. .read_delay = NULL,
  381. .readunlock = NULL,
  382. .name = "ww_mutex_lock"
  383. };
  384. #ifdef CONFIG_RT_MUTEXES
  385. static DEFINE_RT_MUTEX(torture_rtmutex);
  386. static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
  387. {
  388. rt_mutex_lock(&torture_rtmutex);
  389. return 0;
  390. }
  391. static void torture_rtmutex_boost(struct torture_random_state *trsp)
  392. {
  393. int policy;
  394. struct sched_param param;
  395. const unsigned int factor = 50000; /* yes, quite arbitrary */
  396. if (!rt_task(current)) {
  397. /*
  398. * Boost priority once every ~50k operations. When the
  399. * task tries to take the lock, the rtmutex it will account
  400. * for the new priority, and do any corresponding pi-dance.
  401. */
  402. if (trsp && !(torture_random(trsp) %
  403. (cxt.nrealwriters_stress * factor))) {
  404. policy = SCHED_FIFO;
  405. param.sched_priority = MAX_RT_PRIO - 1;
  406. } else /* common case, do nothing */
  407. return;
  408. } else {
  409. /*
  410. * The task will remain boosted for another ~500k operations,
  411. * then restored back to its original prio, and so forth.
  412. *
  413. * When @trsp is nil, we want to force-reset the task for
  414. * stopping the kthread.
  415. */
  416. if (!trsp || !(torture_random(trsp) %
  417. (cxt.nrealwriters_stress * factor * 2))) {
  418. policy = SCHED_NORMAL;
  419. param.sched_priority = 0;
  420. } else /* common case, do nothing */
  421. return;
  422. }
  423. sched_setscheduler_nocheck(current, policy, &param);
  424. }
  425. static void torture_rtmutex_delay(struct torture_random_state *trsp)
  426. {
  427. const unsigned long shortdelay_us = 2;
  428. const unsigned long longdelay_ms = 100;
  429. /*
  430. * We want a short delay mostly to emulate likely code, and
  431. * we want a long delay occasionally to force massive contention.
  432. */
  433. if (!(torture_random(trsp) %
  434. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  435. mdelay(longdelay_ms);
  436. if (!(torture_random(trsp) %
  437. (cxt.nrealwriters_stress * 2 * shortdelay_us)))
  438. udelay(shortdelay_us);
  439. #ifdef CONFIG_PREEMPT
  440. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  441. preempt_schedule(); /* Allow test to be preempted. */
  442. #endif
  443. }
  444. static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
  445. {
  446. rt_mutex_unlock(&torture_rtmutex);
  447. }
  448. static struct lock_torture_ops rtmutex_lock_ops = {
  449. .writelock = torture_rtmutex_lock,
  450. .write_delay = torture_rtmutex_delay,
  451. .task_boost = torture_rtmutex_boost,
  452. .writeunlock = torture_rtmutex_unlock,
  453. .readlock = NULL,
  454. .read_delay = NULL,
  455. .readunlock = NULL,
  456. .name = "rtmutex_lock"
  457. };
  458. #endif
  459. static DECLARE_RWSEM(torture_rwsem);
  460. static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
  461. {
  462. down_write(&torture_rwsem);
  463. return 0;
  464. }
  465. static void torture_rwsem_write_delay(struct torture_random_state *trsp)
  466. {
  467. const unsigned long longdelay_ms = 100;
  468. /* We want a long delay occasionally to force massive contention. */
  469. if (!(torture_random(trsp) %
  470. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  471. mdelay(longdelay_ms * 10);
  472. else
  473. mdelay(longdelay_ms / 10);
  474. #ifdef CONFIG_PREEMPT
  475. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  476. preempt_schedule(); /* Allow test to be preempted. */
  477. #endif
  478. }
  479. static void torture_rwsem_up_write(void) __releases(torture_rwsem)
  480. {
  481. up_write(&torture_rwsem);
  482. }
  483. static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
  484. {
  485. down_read(&torture_rwsem);
  486. return 0;
  487. }
  488. static void torture_rwsem_read_delay(struct torture_random_state *trsp)
  489. {
  490. const unsigned long longdelay_ms = 100;
  491. /* We want a long delay occasionally to force massive contention. */
  492. if (!(torture_random(trsp) %
  493. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  494. mdelay(longdelay_ms * 2);
  495. else
  496. mdelay(longdelay_ms / 2);
  497. #ifdef CONFIG_PREEMPT
  498. if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
  499. preempt_schedule(); /* Allow test to be preempted. */
  500. #endif
  501. }
  502. static void torture_rwsem_up_read(void) __releases(torture_rwsem)
  503. {
  504. up_read(&torture_rwsem);
  505. }
  506. static struct lock_torture_ops rwsem_lock_ops = {
  507. .writelock = torture_rwsem_down_write,
  508. .write_delay = torture_rwsem_write_delay,
  509. .task_boost = torture_boost_dummy,
  510. .writeunlock = torture_rwsem_up_write,
  511. .readlock = torture_rwsem_down_read,
  512. .read_delay = torture_rwsem_read_delay,
  513. .readunlock = torture_rwsem_up_read,
  514. .name = "rwsem_lock"
  515. };
  516. #include <linux/percpu-rwsem.h>
  517. static struct percpu_rw_semaphore pcpu_rwsem;
  518. void torture_percpu_rwsem_init(void)
  519. {
  520. BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
  521. }
  522. static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
  523. {
  524. percpu_down_write(&pcpu_rwsem);
  525. return 0;
  526. }
  527. static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
  528. {
  529. percpu_up_write(&pcpu_rwsem);
  530. }
  531. static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem)
  532. {
  533. percpu_down_read(&pcpu_rwsem);
  534. return 0;
  535. }
  536. static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
  537. {
  538. percpu_up_read(&pcpu_rwsem);
  539. }
  540. static struct lock_torture_ops percpu_rwsem_lock_ops = {
  541. .init = torture_percpu_rwsem_init,
  542. .writelock = torture_percpu_rwsem_down_write,
  543. .write_delay = torture_rwsem_write_delay,
  544. .task_boost = torture_boost_dummy,
  545. .writeunlock = torture_percpu_rwsem_up_write,
  546. .readlock = torture_percpu_rwsem_down_read,
  547. .read_delay = torture_rwsem_read_delay,
  548. .readunlock = torture_percpu_rwsem_up_read,
  549. .name = "percpu_rwsem_lock"
  550. };
  551. /*
  552. * Lock torture writer kthread. Repeatedly acquires and releases
  553. * the lock, checking for duplicate acquisitions.
  554. */
  555. static int lock_torture_writer(void *arg)
  556. {
  557. struct lock_stress_stats *lwsp = arg;
  558. static DEFINE_TORTURE_RANDOM(rand);
  559. VERBOSE_TOROUT_STRING("lock_torture_writer task started");
  560. set_user_nice(current, MAX_NICE);
  561. do {
  562. if ((torture_random(&rand) & 0xfffff) == 0)
  563. schedule_timeout_uninterruptible(1);
  564. cxt.cur_ops->task_boost(&rand);
  565. cxt.cur_ops->writelock();
  566. if (WARN_ON_ONCE(lock_is_write_held))
  567. lwsp->n_lock_fail++;
  568. lock_is_write_held = 1;
  569. if (WARN_ON_ONCE(lock_is_read_held))
  570. lwsp->n_lock_fail++; /* rare, but... */
  571. lwsp->n_lock_acquired++;
  572. cxt.cur_ops->write_delay(&rand);
  573. lock_is_write_held = 0;
  574. cxt.cur_ops->writeunlock();
  575. stutter_wait("lock_torture_writer");
  576. } while (!torture_must_stop());
  577. cxt.cur_ops->task_boost(NULL); /* reset prio */
  578. torture_kthread_stopping("lock_torture_writer");
  579. return 0;
  580. }
  581. /*
  582. * Lock torture reader kthread. Repeatedly acquires and releases
  583. * the reader lock.
  584. */
  585. static int lock_torture_reader(void *arg)
  586. {
  587. struct lock_stress_stats *lrsp = arg;
  588. static DEFINE_TORTURE_RANDOM(rand);
  589. VERBOSE_TOROUT_STRING("lock_torture_reader task started");
  590. set_user_nice(current, MAX_NICE);
  591. do {
  592. if ((torture_random(&rand) & 0xfffff) == 0)
  593. schedule_timeout_uninterruptible(1);
  594. cxt.cur_ops->readlock();
  595. lock_is_read_held = 1;
  596. if (WARN_ON_ONCE(lock_is_write_held))
  597. lrsp->n_lock_fail++; /* rare, but... */
  598. lrsp->n_lock_acquired++;
  599. cxt.cur_ops->read_delay(&rand);
  600. lock_is_read_held = 0;
  601. cxt.cur_ops->readunlock();
  602. stutter_wait("lock_torture_reader");
  603. } while (!torture_must_stop());
  604. torture_kthread_stopping("lock_torture_reader");
  605. return 0;
  606. }
  607. /*
  608. * Create an lock-torture-statistics message in the specified buffer.
  609. */
  610. static void __torture_print_stats(char *page,
  611. struct lock_stress_stats *statp, bool write)
  612. {
  613. bool fail = 0;
  614. int i, n_stress;
  615. long max = 0;
  616. long min = statp[0].n_lock_acquired;
  617. long long sum = 0;
  618. n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
  619. for (i = 0; i < n_stress; i++) {
  620. if (statp[i].n_lock_fail)
  621. fail = true;
  622. sum += statp[i].n_lock_acquired;
  623. if (max < statp[i].n_lock_fail)
  624. max = statp[i].n_lock_fail;
  625. if (min > statp[i].n_lock_fail)
  626. min = statp[i].n_lock_fail;
  627. }
  628. page += sprintf(page,
  629. "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
  630. write ? "Writes" : "Reads ",
  631. sum, max, min, max / 2 > min ? "???" : "",
  632. fail, fail ? "!!!" : "");
  633. if (fail)
  634. atomic_inc(&cxt.n_lock_torture_errors);
  635. }
  636. /*
  637. * Print torture statistics. Caller must ensure that there is only one
  638. * call to this function at a given time!!! This is normally accomplished
  639. * by relying on the module system to only have one copy of the module
  640. * loaded, and then by giving the lock_torture_stats kthread full control
  641. * (or the init/cleanup functions when lock_torture_stats thread is not
  642. * running).
  643. */
  644. static void lock_torture_stats_print(void)
  645. {
  646. int size = cxt.nrealwriters_stress * 200 + 8192;
  647. char *buf;
  648. if (cxt.cur_ops->readlock)
  649. size += cxt.nrealreaders_stress * 200 + 8192;
  650. buf = kmalloc(size, GFP_KERNEL);
  651. if (!buf) {
  652. pr_err("lock_torture_stats_print: Out of memory, need: %d",
  653. size);
  654. return;
  655. }
  656. __torture_print_stats(buf, cxt.lwsa, true);
  657. pr_alert("%s", buf);
  658. kfree(buf);
  659. if (cxt.cur_ops->readlock) {
  660. buf = kmalloc(size, GFP_KERNEL);
  661. if (!buf) {
  662. pr_err("lock_torture_stats_print: Out of memory, need: %d",
  663. size);
  664. return;
  665. }
  666. __torture_print_stats(buf, cxt.lrsa, false);
  667. pr_alert("%s", buf);
  668. kfree(buf);
  669. }
  670. }
  671. /*
  672. * Periodically prints torture statistics, if periodic statistics printing
  673. * was specified via the stat_interval module parameter.
  674. *
  675. * No need to worry about fullstop here, since this one doesn't reference
  676. * volatile state or register callbacks.
  677. */
  678. static int lock_torture_stats(void *arg)
  679. {
  680. VERBOSE_TOROUT_STRING("lock_torture_stats task started");
  681. do {
  682. schedule_timeout_interruptible(stat_interval * HZ);
  683. lock_torture_stats_print();
  684. torture_shutdown_absorb("lock_torture_stats");
  685. } while (!torture_must_stop());
  686. torture_kthread_stopping("lock_torture_stats");
  687. return 0;
  688. }
  689. static inline void
  690. lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
  691. const char *tag)
  692. {
  693. pr_alert("%s" TORTURE_FLAG
  694. "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
  695. torture_type, tag, cxt.debug_lock ? " [debug]": "",
  696. cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
  697. verbose, shuffle_interval, stutter, shutdown_secs,
  698. onoff_interval, onoff_holdoff);
  699. }
  700. static void lock_torture_cleanup(void)
  701. {
  702. int i;
  703. if (torture_cleanup_begin())
  704. return;
  705. /*
  706. * Indicates early cleanup, meaning that the test has not run,
  707. * such as when passing bogus args when loading the module. As
  708. * such, only perform the underlying torture-specific cleanups,
  709. * and avoid anything related to locktorture.
  710. */
  711. if (!cxt.lwsa)
  712. goto end;
  713. if (writer_tasks) {
  714. for (i = 0; i < cxt.nrealwriters_stress; i++)
  715. torture_stop_kthread(lock_torture_writer,
  716. writer_tasks[i]);
  717. kfree(writer_tasks);
  718. writer_tasks = NULL;
  719. }
  720. if (reader_tasks) {
  721. for (i = 0; i < cxt.nrealreaders_stress; i++)
  722. torture_stop_kthread(lock_torture_reader,
  723. reader_tasks[i]);
  724. kfree(reader_tasks);
  725. reader_tasks = NULL;
  726. }
  727. torture_stop_kthread(lock_torture_stats, stats_task);
  728. lock_torture_stats_print(); /* -After- the stats thread is stopped! */
  729. if (atomic_read(&cxt.n_lock_torture_errors))
  730. lock_torture_print_module_parms(cxt.cur_ops,
  731. "End of test: FAILURE");
  732. else if (torture_onoff_failures())
  733. lock_torture_print_module_parms(cxt.cur_ops,
  734. "End of test: LOCK_HOTPLUG");
  735. else
  736. lock_torture_print_module_parms(cxt.cur_ops,
  737. "End of test: SUCCESS");
  738. kfree(cxt.lwsa);
  739. kfree(cxt.lrsa);
  740. end:
  741. torture_cleanup_end();
  742. }
  743. static int __init lock_torture_init(void)
  744. {
  745. int i, j;
  746. int firsterr = 0;
  747. static struct lock_torture_ops *torture_ops[] = {
  748. &lock_busted_ops,
  749. &spin_lock_ops, &spin_lock_irq_ops,
  750. &rw_lock_ops, &rw_lock_irq_ops,
  751. &mutex_lock_ops,
  752. &ww_mutex_lock_ops,
  753. #ifdef CONFIG_RT_MUTEXES
  754. &rtmutex_lock_ops,
  755. #endif
  756. &rwsem_lock_ops,
  757. &percpu_rwsem_lock_ops,
  758. };
  759. if (!torture_init_begin(torture_type, verbose, &torture_runnable))
  760. return -EBUSY;
  761. /* Process args and tell the world that the torturer is on the job. */
  762. for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
  763. cxt.cur_ops = torture_ops[i];
  764. if (strcmp(torture_type, cxt.cur_ops->name) == 0)
  765. break;
  766. }
  767. if (i == ARRAY_SIZE(torture_ops)) {
  768. pr_alert("lock-torture: invalid torture type: \"%s\"\n",
  769. torture_type);
  770. pr_alert("lock-torture types:");
  771. for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
  772. pr_alert(" %s", torture_ops[i]->name);
  773. pr_alert("\n");
  774. firsterr = -EINVAL;
  775. goto unwind;
  776. }
  777. if (cxt.cur_ops->init)
  778. cxt.cur_ops->init();
  779. if (nwriters_stress >= 0)
  780. cxt.nrealwriters_stress = nwriters_stress;
  781. else
  782. cxt.nrealwriters_stress = 2 * num_online_cpus();
  783. #ifdef CONFIG_DEBUG_MUTEXES
  784. if (strncmp(torture_type, "mutex", 5) == 0)
  785. cxt.debug_lock = true;
  786. #endif
  787. #ifdef CONFIG_DEBUG_RT_MUTEXES
  788. if (strncmp(torture_type, "rtmutex", 7) == 0)
  789. cxt.debug_lock = true;
  790. #endif
  791. #ifdef CONFIG_DEBUG_SPINLOCK
  792. if ((strncmp(torture_type, "spin", 4) == 0) ||
  793. (strncmp(torture_type, "rw_lock", 7) == 0))
  794. cxt.debug_lock = true;
  795. #endif
  796. /* Initialize the statistics so that each run gets its own numbers. */
  797. lock_is_write_held = 0;
  798. cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
  799. if (cxt.lwsa == NULL) {
  800. VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
  801. firsterr = -ENOMEM;
  802. goto unwind;
  803. }
  804. for (i = 0; i < cxt.nrealwriters_stress; i++) {
  805. cxt.lwsa[i].n_lock_fail = 0;
  806. cxt.lwsa[i].n_lock_acquired = 0;
  807. }
  808. if (cxt.cur_ops->readlock) {
  809. if (nreaders_stress >= 0)
  810. cxt.nrealreaders_stress = nreaders_stress;
  811. else {
  812. /*
  813. * By default distribute evenly the number of
  814. * readers and writers. We still run the same number
  815. * of threads as the writer-only locks default.
  816. */
  817. if (nwriters_stress < 0) /* user doesn't care */
  818. cxt.nrealwriters_stress = num_online_cpus();
  819. cxt.nrealreaders_stress = cxt.nrealwriters_stress;
  820. }
  821. lock_is_read_held = 0;
  822. cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
  823. if (cxt.lrsa == NULL) {
  824. VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
  825. firsterr = -ENOMEM;
  826. kfree(cxt.lwsa);
  827. cxt.lwsa = NULL;
  828. goto unwind;
  829. }
  830. for (i = 0; i < cxt.nrealreaders_stress; i++) {
  831. cxt.lrsa[i].n_lock_fail = 0;
  832. cxt.lrsa[i].n_lock_acquired = 0;
  833. }
  834. }
  835. lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
  836. /* Prepare torture context. */
  837. if (onoff_interval > 0) {
  838. firsterr = torture_onoff_init(onoff_holdoff * HZ,
  839. onoff_interval * HZ);
  840. if (firsterr)
  841. goto unwind;
  842. }
  843. if (shuffle_interval > 0) {
  844. firsterr = torture_shuffle_init(shuffle_interval);
  845. if (firsterr)
  846. goto unwind;
  847. }
  848. if (shutdown_secs > 0) {
  849. firsterr = torture_shutdown_init(shutdown_secs,
  850. lock_torture_cleanup);
  851. if (firsterr)
  852. goto unwind;
  853. }
  854. if (stutter > 0) {
  855. firsterr = torture_stutter_init(stutter);
  856. if (firsterr)
  857. goto unwind;
  858. }
  859. writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
  860. GFP_KERNEL);
  861. if (writer_tasks == NULL) {
  862. VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
  863. firsterr = -ENOMEM;
  864. goto unwind;
  865. }
  866. if (cxt.cur_ops->readlock) {
  867. reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]),
  868. GFP_KERNEL);
  869. if (reader_tasks == NULL) {
  870. VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
  871. kfree(writer_tasks);
  872. writer_tasks = NULL;
  873. firsterr = -ENOMEM;
  874. goto unwind;
  875. }
  876. }
  877. /*
  878. * Create the kthreads and start torturing (oh, those poor little locks).
  879. *
  880. * TODO: Note that we interleave writers with readers, giving writers a
  881. * slight advantage, by creating its kthread first. This can be modified
  882. * for very specific needs, or even let the user choose the policy, if
  883. * ever wanted.
  884. */
  885. for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
  886. j < cxt.nrealreaders_stress; i++, j++) {
  887. if (i >= cxt.nrealwriters_stress)
  888. goto create_reader;
  889. /* Create writer. */
  890. firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
  891. writer_tasks[i]);
  892. if (firsterr)
  893. goto unwind;
  894. create_reader:
  895. if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
  896. continue;
  897. /* Create reader. */
  898. firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
  899. reader_tasks[j]);
  900. if (firsterr)
  901. goto unwind;
  902. }
  903. if (stat_interval > 0) {
  904. firsterr = torture_create_kthread(lock_torture_stats, NULL,
  905. stats_task);
  906. if (firsterr)
  907. goto unwind;
  908. }
  909. torture_init_end();
  910. return 0;
  911. unwind:
  912. torture_init_end();
  913. lock_torture_cleanup();
  914. return firsterr;
  915. }
  916. module_init(lock_torture_init);
  917. module_exit(lock_torture_cleanup);