locktorture.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059
  1. /*
  2. * Module-based torture test facility for locking
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, you can access it online at
  16. * http://www.gnu.org/licenses/gpl-2.0.html.
  17. *
  18. * Copyright (C) IBM Corporation, 2014
  19. *
  20. * Authors: Paul E. McKenney <paulmck@us.ibm.com>
  21. * Davidlohr Bueso <dave@stgolabs.net>
  22. * Based on kernel/rcu/torture.c.
  23. */
  24. #include <linux/kernel.h>
  25. #include <linux/module.h>
  26. #include <linux/kthread.h>
  27. #include <linux/sched/rt.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/rwlock.h>
  30. #include <linux/mutex.h>
  31. #include <linux/rwsem.h>
  32. #include <linux/smp.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/sched.h>
  35. #include <uapi/linux/sched/types.h>
  36. #include <linux/rtmutex.h>
  37. #include <linux/atomic.h>
  38. #include <linux/moduleparam.h>
  39. #include <linux/delay.h>
  40. #include <linux/slab.h>
  41. #include <linux/percpu-rwsem.h>
  42. #include <linux/torture.h>
  43. MODULE_LICENSE("GPL");
  44. MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
  45. torture_param(int, nwriters_stress, -1,
  46. "Number of write-locking stress-test threads");
  47. torture_param(int, nreaders_stress, -1,
  48. "Number of read-locking stress-test threads");
  49. torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
  50. torture_param(int, onoff_interval, 0,
  51. "Time between CPU hotplugs (s), 0=disable");
  52. torture_param(int, shuffle_interval, 3,
  53. "Number of jiffies between shuffles, 0=disable");
  54. torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
  55. torture_param(int, stat_interval, 60,
  56. "Number of seconds between stats printk()s");
  57. torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
  58. torture_param(bool, verbose, true,
  59. "Enable verbose debugging printk()s");
  60. static char *torture_type = "spin_lock";
  61. module_param(torture_type, charp, 0444);
  62. MODULE_PARM_DESC(torture_type,
  63. "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
  64. static struct task_struct *stats_task;
  65. static struct task_struct **writer_tasks;
  66. static struct task_struct **reader_tasks;
  67. static bool lock_is_write_held;
  68. static bool lock_is_read_held;
  69. struct lock_stress_stats {
  70. long n_lock_fail;
  71. long n_lock_acquired;
  72. };
  73. /* Forward reference. */
  74. static void lock_torture_cleanup(void);
  75. /*
  76. * Operations vector for selecting different types of tests.
  77. */
  78. struct lock_torture_ops {
  79. void (*init)(void);
  80. int (*writelock)(void);
  81. void (*write_delay)(struct torture_random_state *trsp);
  82. void (*task_boost)(struct torture_random_state *trsp);
  83. void (*writeunlock)(void);
  84. int (*readlock)(void);
  85. void (*read_delay)(struct torture_random_state *trsp);
  86. void (*readunlock)(void);
  87. unsigned long flags; /* for irq spinlocks */
  88. const char *name;
  89. };
  90. struct lock_torture_cxt {
  91. int nrealwriters_stress;
  92. int nrealreaders_stress;
  93. bool debug_lock;
  94. atomic_t n_lock_torture_errors;
  95. struct lock_torture_ops *cur_ops;
  96. struct lock_stress_stats *lwsa; /* writer statistics */
  97. struct lock_stress_stats *lrsa; /* reader statistics */
  98. };
  99. static struct lock_torture_cxt cxt = { 0, 0, false,
  100. ATOMIC_INIT(0),
  101. NULL, NULL};
  102. /*
  103. * Definitions for lock torture testing.
  104. */
  105. static int torture_lock_busted_write_lock(void)
  106. {
  107. return 0; /* BUGGY, do not use in real life!!! */
  108. }
  109. static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
  110. {
  111. const unsigned long longdelay_ms = 100;
  112. /* We want a long delay occasionally to force massive contention. */
  113. if (!(torture_random(trsp) %
  114. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  115. mdelay(longdelay_ms);
  116. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  117. torture_preempt_schedule(); /* Allow test to be preempted. */
  118. }
  119. static void torture_lock_busted_write_unlock(void)
  120. {
  121. /* BUGGY, do not use in real life!!! */
  122. }
  123. static void torture_boost_dummy(struct torture_random_state *trsp)
  124. {
  125. /* Only rtmutexes care about priority */
  126. }
  127. static struct lock_torture_ops lock_busted_ops = {
  128. .writelock = torture_lock_busted_write_lock,
  129. .write_delay = torture_lock_busted_write_delay,
  130. .task_boost = torture_boost_dummy,
  131. .writeunlock = torture_lock_busted_write_unlock,
  132. .readlock = NULL,
  133. .read_delay = NULL,
  134. .readunlock = NULL,
  135. .name = "lock_busted"
  136. };
  137. static DEFINE_SPINLOCK(torture_spinlock);
  138. static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
  139. {
  140. spin_lock(&torture_spinlock);
  141. return 0;
  142. }
  143. static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
  144. {
  145. const unsigned long shortdelay_us = 2;
  146. const unsigned long longdelay_ms = 100;
  147. /* We want a short delay mostly to emulate likely code, and
  148. * we want a long delay occasionally to force massive contention.
  149. */
  150. if (!(torture_random(trsp) %
  151. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  152. mdelay(longdelay_ms);
  153. if (!(torture_random(trsp) %
  154. (cxt.nrealwriters_stress * 2 * shortdelay_us)))
  155. udelay(shortdelay_us);
  156. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  157. torture_preempt_schedule(); /* Allow test to be preempted. */
  158. }
  159. static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
  160. {
  161. spin_unlock(&torture_spinlock);
  162. }
  163. static struct lock_torture_ops spin_lock_ops = {
  164. .writelock = torture_spin_lock_write_lock,
  165. .write_delay = torture_spin_lock_write_delay,
  166. .task_boost = torture_boost_dummy,
  167. .writeunlock = torture_spin_lock_write_unlock,
  168. .readlock = NULL,
  169. .read_delay = NULL,
  170. .readunlock = NULL,
  171. .name = "spin_lock"
  172. };
  173. static int torture_spin_lock_write_lock_irq(void)
  174. __acquires(torture_spinlock)
  175. {
  176. unsigned long flags;
  177. spin_lock_irqsave(&torture_spinlock, flags);
  178. cxt.cur_ops->flags = flags;
  179. return 0;
  180. }
  181. static void torture_lock_spin_write_unlock_irq(void)
  182. __releases(torture_spinlock)
  183. {
  184. spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
  185. }
  186. static struct lock_torture_ops spin_lock_irq_ops = {
  187. .writelock = torture_spin_lock_write_lock_irq,
  188. .write_delay = torture_spin_lock_write_delay,
  189. .task_boost = torture_boost_dummy,
  190. .writeunlock = torture_lock_spin_write_unlock_irq,
  191. .readlock = NULL,
  192. .read_delay = NULL,
  193. .readunlock = NULL,
  194. .name = "spin_lock_irq"
  195. };
  196. static DEFINE_RWLOCK(torture_rwlock);
  197. static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
  198. {
  199. write_lock(&torture_rwlock);
  200. return 0;
  201. }
  202. static void torture_rwlock_write_delay(struct torture_random_state *trsp)
  203. {
  204. const unsigned long shortdelay_us = 2;
  205. const unsigned long longdelay_ms = 100;
  206. /* We want a short delay mostly to emulate likely code, and
  207. * we want a long delay occasionally to force massive contention.
  208. */
  209. if (!(torture_random(trsp) %
  210. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  211. mdelay(longdelay_ms);
  212. else
  213. udelay(shortdelay_us);
  214. }
  215. static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
  216. {
  217. write_unlock(&torture_rwlock);
  218. }
  219. static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
  220. {
  221. read_lock(&torture_rwlock);
  222. return 0;
  223. }
  224. static void torture_rwlock_read_delay(struct torture_random_state *trsp)
  225. {
  226. const unsigned long shortdelay_us = 10;
  227. const unsigned long longdelay_ms = 100;
  228. /* We want a short delay mostly to emulate likely code, and
  229. * we want a long delay occasionally to force massive contention.
  230. */
  231. if (!(torture_random(trsp) %
  232. (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
  233. mdelay(longdelay_ms);
  234. else
  235. udelay(shortdelay_us);
  236. }
  237. static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
  238. {
  239. read_unlock(&torture_rwlock);
  240. }
  241. static struct lock_torture_ops rw_lock_ops = {
  242. .writelock = torture_rwlock_write_lock,
  243. .write_delay = torture_rwlock_write_delay,
  244. .task_boost = torture_boost_dummy,
  245. .writeunlock = torture_rwlock_write_unlock,
  246. .readlock = torture_rwlock_read_lock,
  247. .read_delay = torture_rwlock_read_delay,
  248. .readunlock = torture_rwlock_read_unlock,
  249. .name = "rw_lock"
  250. };
  251. static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
  252. {
  253. unsigned long flags;
  254. write_lock_irqsave(&torture_rwlock, flags);
  255. cxt.cur_ops->flags = flags;
  256. return 0;
  257. }
  258. static void torture_rwlock_write_unlock_irq(void)
  259. __releases(torture_rwlock)
  260. {
  261. write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
  262. }
  263. static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
  264. {
  265. unsigned long flags;
  266. read_lock_irqsave(&torture_rwlock, flags);
  267. cxt.cur_ops->flags = flags;
  268. return 0;
  269. }
  270. static void torture_rwlock_read_unlock_irq(void)
  271. __releases(torture_rwlock)
  272. {
  273. read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
  274. }
  275. static struct lock_torture_ops rw_lock_irq_ops = {
  276. .writelock = torture_rwlock_write_lock_irq,
  277. .write_delay = torture_rwlock_write_delay,
  278. .task_boost = torture_boost_dummy,
  279. .writeunlock = torture_rwlock_write_unlock_irq,
  280. .readlock = torture_rwlock_read_lock_irq,
  281. .read_delay = torture_rwlock_read_delay,
  282. .readunlock = torture_rwlock_read_unlock_irq,
  283. .name = "rw_lock_irq"
  284. };
  285. static DEFINE_MUTEX(torture_mutex);
  286. static int torture_mutex_lock(void) __acquires(torture_mutex)
  287. {
  288. mutex_lock(&torture_mutex);
  289. return 0;
  290. }
  291. static void torture_mutex_delay(struct torture_random_state *trsp)
  292. {
  293. const unsigned long longdelay_ms = 100;
  294. /* We want a long delay occasionally to force massive contention. */
  295. if (!(torture_random(trsp) %
  296. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  297. mdelay(longdelay_ms * 5);
  298. else
  299. mdelay(longdelay_ms / 5);
  300. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  301. torture_preempt_schedule(); /* Allow test to be preempted. */
  302. }
  303. static void torture_mutex_unlock(void) __releases(torture_mutex)
  304. {
  305. mutex_unlock(&torture_mutex);
  306. }
  307. static struct lock_torture_ops mutex_lock_ops = {
  308. .writelock = torture_mutex_lock,
  309. .write_delay = torture_mutex_delay,
  310. .task_boost = torture_boost_dummy,
  311. .writeunlock = torture_mutex_unlock,
  312. .readlock = NULL,
  313. .read_delay = NULL,
  314. .readunlock = NULL,
  315. .name = "mutex_lock"
  316. };
  317. #include <linux/ww_mutex.h>
  318. static DEFINE_WW_CLASS(torture_ww_class);
  319. static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
  320. static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
  321. static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
  322. static int torture_ww_mutex_lock(void)
  323. __acquires(torture_ww_mutex_0)
  324. __acquires(torture_ww_mutex_1)
  325. __acquires(torture_ww_mutex_2)
  326. {
  327. LIST_HEAD(list);
  328. struct reorder_lock {
  329. struct list_head link;
  330. struct ww_mutex *lock;
  331. } locks[3], *ll, *ln;
  332. struct ww_acquire_ctx ctx;
  333. locks[0].lock = &torture_ww_mutex_0;
  334. list_add(&locks[0].link, &list);
  335. locks[1].lock = &torture_ww_mutex_1;
  336. list_add(&locks[1].link, &list);
  337. locks[2].lock = &torture_ww_mutex_2;
  338. list_add(&locks[2].link, &list);
  339. ww_acquire_init(&ctx, &torture_ww_class);
  340. list_for_each_entry(ll, &list, link) {
  341. int err;
  342. err = ww_mutex_lock(ll->lock, &ctx);
  343. if (!err)
  344. continue;
  345. ln = ll;
  346. list_for_each_entry_continue_reverse(ln, &list, link)
  347. ww_mutex_unlock(ln->lock);
  348. if (err != -EDEADLK)
  349. return err;
  350. ww_mutex_lock_slow(ll->lock, &ctx);
  351. list_move(&ll->link, &list);
  352. }
  353. ww_acquire_fini(&ctx);
  354. return 0;
  355. }
  356. static void torture_ww_mutex_unlock(void)
  357. __releases(torture_ww_mutex_0)
  358. __releases(torture_ww_mutex_1)
  359. __releases(torture_ww_mutex_2)
  360. {
  361. ww_mutex_unlock(&torture_ww_mutex_0);
  362. ww_mutex_unlock(&torture_ww_mutex_1);
  363. ww_mutex_unlock(&torture_ww_mutex_2);
  364. }
  365. static struct lock_torture_ops ww_mutex_lock_ops = {
  366. .writelock = torture_ww_mutex_lock,
  367. .write_delay = torture_mutex_delay,
  368. .task_boost = torture_boost_dummy,
  369. .writeunlock = torture_ww_mutex_unlock,
  370. .readlock = NULL,
  371. .read_delay = NULL,
  372. .readunlock = NULL,
  373. .name = "ww_mutex_lock"
  374. };
  375. #ifdef CONFIG_RT_MUTEXES
  376. static DEFINE_RT_MUTEX(torture_rtmutex);
  377. static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
  378. {
  379. rt_mutex_lock(&torture_rtmutex);
  380. return 0;
  381. }
  382. static void torture_rtmutex_boost(struct torture_random_state *trsp)
  383. {
  384. int policy;
  385. struct sched_param param;
  386. const unsigned int factor = 50000; /* yes, quite arbitrary */
  387. if (!rt_task(current)) {
  388. /*
  389. * Boost priority once every ~50k operations. When the
  390. * task tries to take the lock, the rtmutex it will account
  391. * for the new priority, and do any corresponding pi-dance.
  392. */
  393. if (trsp && !(torture_random(trsp) %
  394. (cxt.nrealwriters_stress * factor))) {
  395. policy = SCHED_FIFO;
  396. param.sched_priority = MAX_RT_PRIO - 1;
  397. } else /* common case, do nothing */
  398. return;
  399. } else {
  400. /*
  401. * The task will remain boosted for another ~500k operations,
  402. * then restored back to its original prio, and so forth.
  403. *
  404. * When @trsp is nil, we want to force-reset the task for
  405. * stopping the kthread.
  406. */
  407. if (!trsp || !(torture_random(trsp) %
  408. (cxt.nrealwriters_stress * factor * 2))) {
  409. policy = SCHED_NORMAL;
  410. param.sched_priority = 0;
  411. } else /* common case, do nothing */
  412. return;
  413. }
  414. sched_setscheduler_nocheck(current, policy, &param);
  415. }
  416. static void torture_rtmutex_delay(struct torture_random_state *trsp)
  417. {
  418. const unsigned long shortdelay_us = 2;
  419. const unsigned long longdelay_ms = 100;
  420. /*
  421. * We want a short delay mostly to emulate likely code, and
  422. * we want a long delay occasionally to force massive contention.
  423. */
  424. if (!(torture_random(trsp) %
  425. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  426. mdelay(longdelay_ms);
  427. if (!(torture_random(trsp) %
  428. (cxt.nrealwriters_stress * 2 * shortdelay_us)))
  429. udelay(shortdelay_us);
  430. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  431. torture_preempt_schedule(); /* Allow test to be preempted. */
  432. }
  433. static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
  434. {
  435. rt_mutex_unlock(&torture_rtmutex);
  436. }
  437. static struct lock_torture_ops rtmutex_lock_ops = {
  438. .writelock = torture_rtmutex_lock,
  439. .write_delay = torture_rtmutex_delay,
  440. .task_boost = torture_rtmutex_boost,
  441. .writeunlock = torture_rtmutex_unlock,
  442. .readlock = NULL,
  443. .read_delay = NULL,
  444. .readunlock = NULL,
  445. .name = "rtmutex_lock"
  446. };
  447. #endif
  448. static DECLARE_RWSEM(torture_rwsem);
  449. static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
  450. {
  451. down_write(&torture_rwsem);
  452. return 0;
  453. }
  454. static void torture_rwsem_write_delay(struct torture_random_state *trsp)
  455. {
  456. const unsigned long longdelay_ms = 100;
  457. /* We want a long delay occasionally to force massive contention. */
  458. if (!(torture_random(trsp) %
  459. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  460. mdelay(longdelay_ms * 10);
  461. else
  462. mdelay(longdelay_ms / 10);
  463. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  464. torture_preempt_schedule(); /* Allow test to be preempted. */
  465. }
  466. static void torture_rwsem_up_write(void) __releases(torture_rwsem)
  467. {
  468. up_write(&torture_rwsem);
  469. }
  470. static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
  471. {
  472. down_read(&torture_rwsem);
  473. return 0;
  474. }
  475. static void torture_rwsem_read_delay(struct torture_random_state *trsp)
  476. {
  477. const unsigned long longdelay_ms = 100;
  478. /* We want a long delay occasionally to force massive contention. */
  479. if (!(torture_random(trsp) %
  480. (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
  481. mdelay(longdelay_ms * 2);
  482. else
  483. mdelay(longdelay_ms / 2);
  484. if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
  485. torture_preempt_schedule(); /* Allow test to be preempted. */
  486. }
  487. static void torture_rwsem_up_read(void) __releases(torture_rwsem)
  488. {
  489. up_read(&torture_rwsem);
  490. }
  491. static struct lock_torture_ops rwsem_lock_ops = {
  492. .writelock = torture_rwsem_down_write,
  493. .write_delay = torture_rwsem_write_delay,
  494. .task_boost = torture_boost_dummy,
  495. .writeunlock = torture_rwsem_up_write,
  496. .readlock = torture_rwsem_down_read,
  497. .read_delay = torture_rwsem_read_delay,
  498. .readunlock = torture_rwsem_up_read,
  499. .name = "rwsem_lock"
  500. };
  501. #include <linux/percpu-rwsem.h>
  502. static struct percpu_rw_semaphore pcpu_rwsem;
  503. void torture_percpu_rwsem_init(void)
  504. {
  505. BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
  506. }
  507. static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
  508. {
  509. percpu_down_write(&pcpu_rwsem);
  510. return 0;
  511. }
  512. static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
  513. {
  514. percpu_up_write(&pcpu_rwsem);
  515. }
  516. static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem)
  517. {
  518. percpu_down_read(&pcpu_rwsem);
  519. return 0;
  520. }
  521. static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
  522. {
  523. percpu_up_read(&pcpu_rwsem);
  524. }
  525. static struct lock_torture_ops percpu_rwsem_lock_ops = {
  526. .init = torture_percpu_rwsem_init,
  527. .writelock = torture_percpu_rwsem_down_write,
  528. .write_delay = torture_rwsem_write_delay,
  529. .task_boost = torture_boost_dummy,
  530. .writeunlock = torture_percpu_rwsem_up_write,
  531. .readlock = torture_percpu_rwsem_down_read,
  532. .read_delay = torture_rwsem_read_delay,
  533. .readunlock = torture_percpu_rwsem_up_read,
  534. .name = "percpu_rwsem_lock"
  535. };
  536. /*
  537. * Lock torture writer kthread. Repeatedly acquires and releases
  538. * the lock, checking for duplicate acquisitions.
  539. */
  540. static int lock_torture_writer(void *arg)
  541. {
  542. struct lock_stress_stats *lwsp = arg;
  543. static DEFINE_TORTURE_RANDOM(rand);
  544. VERBOSE_TOROUT_STRING("lock_torture_writer task started");
  545. set_user_nice(current, MAX_NICE);
  546. do {
  547. if ((torture_random(&rand) & 0xfffff) == 0)
  548. schedule_timeout_uninterruptible(1);
  549. cxt.cur_ops->task_boost(&rand);
  550. cxt.cur_ops->writelock();
  551. if (WARN_ON_ONCE(lock_is_write_held))
  552. lwsp->n_lock_fail++;
  553. lock_is_write_held = 1;
  554. if (WARN_ON_ONCE(lock_is_read_held))
  555. lwsp->n_lock_fail++; /* rare, but... */
  556. lwsp->n_lock_acquired++;
  557. cxt.cur_ops->write_delay(&rand);
  558. lock_is_write_held = 0;
  559. cxt.cur_ops->writeunlock();
  560. stutter_wait("lock_torture_writer");
  561. } while (!torture_must_stop());
  562. cxt.cur_ops->task_boost(NULL); /* reset prio */
  563. torture_kthread_stopping("lock_torture_writer");
  564. return 0;
  565. }
  566. /*
  567. * Lock torture reader kthread. Repeatedly acquires and releases
  568. * the reader lock.
  569. */
  570. static int lock_torture_reader(void *arg)
  571. {
  572. struct lock_stress_stats *lrsp = arg;
  573. static DEFINE_TORTURE_RANDOM(rand);
  574. VERBOSE_TOROUT_STRING("lock_torture_reader task started");
  575. set_user_nice(current, MAX_NICE);
  576. do {
  577. if ((torture_random(&rand) & 0xfffff) == 0)
  578. schedule_timeout_uninterruptible(1);
  579. cxt.cur_ops->readlock();
  580. lock_is_read_held = 1;
  581. if (WARN_ON_ONCE(lock_is_write_held))
  582. lrsp->n_lock_fail++; /* rare, but... */
  583. lrsp->n_lock_acquired++;
  584. cxt.cur_ops->read_delay(&rand);
  585. lock_is_read_held = 0;
  586. cxt.cur_ops->readunlock();
  587. stutter_wait("lock_torture_reader");
  588. } while (!torture_must_stop());
  589. torture_kthread_stopping("lock_torture_reader");
  590. return 0;
  591. }
  592. /*
  593. * Create an lock-torture-statistics message in the specified buffer.
  594. */
  595. static void __torture_print_stats(char *page,
  596. struct lock_stress_stats *statp, bool write)
  597. {
  598. bool fail = 0;
  599. int i, n_stress;
  600. long max = 0, min = statp ? statp[0].n_lock_acquired : 0;
  601. long long sum = 0;
  602. n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
  603. for (i = 0; i < n_stress; i++) {
  604. if (statp[i].n_lock_fail)
  605. fail = true;
  606. sum += statp[i].n_lock_acquired;
  607. if (max < statp[i].n_lock_fail)
  608. max = statp[i].n_lock_fail;
  609. if (min > statp[i].n_lock_fail)
  610. min = statp[i].n_lock_fail;
  611. }
  612. page += sprintf(page,
  613. "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
  614. write ? "Writes" : "Reads ",
  615. sum, max, min, max / 2 > min ? "???" : "",
  616. fail, fail ? "!!!" : "");
  617. if (fail)
  618. atomic_inc(&cxt.n_lock_torture_errors);
  619. }
  620. /*
  621. * Print torture statistics. Caller must ensure that there is only one
  622. * call to this function at a given time!!! This is normally accomplished
  623. * by relying on the module system to only have one copy of the module
  624. * loaded, and then by giving the lock_torture_stats kthread full control
  625. * (or the init/cleanup functions when lock_torture_stats thread is not
  626. * running).
  627. */
  628. static void lock_torture_stats_print(void)
  629. {
  630. int size = cxt.nrealwriters_stress * 200 + 8192;
  631. char *buf;
  632. if (cxt.cur_ops->readlock)
  633. size += cxt.nrealreaders_stress * 200 + 8192;
  634. buf = kmalloc(size, GFP_KERNEL);
  635. if (!buf) {
  636. pr_err("lock_torture_stats_print: Out of memory, need: %d",
  637. size);
  638. return;
  639. }
  640. __torture_print_stats(buf, cxt.lwsa, true);
  641. pr_alert("%s", buf);
  642. kfree(buf);
  643. if (cxt.cur_ops->readlock) {
  644. buf = kmalloc(size, GFP_KERNEL);
  645. if (!buf) {
  646. pr_err("lock_torture_stats_print: Out of memory, need: %d",
  647. size);
  648. return;
  649. }
  650. __torture_print_stats(buf, cxt.lrsa, false);
  651. pr_alert("%s", buf);
  652. kfree(buf);
  653. }
  654. }
  655. /*
  656. * Periodically prints torture statistics, if periodic statistics printing
  657. * was specified via the stat_interval module parameter.
  658. *
  659. * No need to worry about fullstop here, since this one doesn't reference
  660. * volatile state or register callbacks.
  661. */
  662. static int lock_torture_stats(void *arg)
  663. {
  664. VERBOSE_TOROUT_STRING("lock_torture_stats task started");
  665. do {
  666. schedule_timeout_interruptible(stat_interval * HZ);
  667. lock_torture_stats_print();
  668. torture_shutdown_absorb("lock_torture_stats");
  669. } while (!torture_must_stop());
  670. torture_kthread_stopping("lock_torture_stats");
  671. return 0;
  672. }
  673. static inline void
  674. lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
  675. const char *tag)
  676. {
  677. pr_alert("%s" TORTURE_FLAG
  678. "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
  679. torture_type, tag, cxt.debug_lock ? " [debug]": "",
  680. cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
  681. verbose, shuffle_interval, stutter, shutdown_secs,
  682. onoff_interval, onoff_holdoff);
  683. }
  684. static void lock_torture_cleanup(void)
  685. {
  686. int i;
  687. if (torture_cleanup_begin())
  688. return;
  689. /*
  690. * Indicates early cleanup, meaning that the test has not run,
  691. * such as when passing bogus args when loading the module. As
  692. * such, only perform the underlying torture-specific cleanups,
  693. * and avoid anything related to locktorture.
  694. */
  695. if (!cxt.lwsa && !cxt.lrsa)
  696. goto end;
  697. if (writer_tasks) {
  698. for (i = 0; i < cxt.nrealwriters_stress; i++)
  699. torture_stop_kthread(lock_torture_writer,
  700. writer_tasks[i]);
  701. kfree(writer_tasks);
  702. writer_tasks = NULL;
  703. }
  704. if (reader_tasks) {
  705. for (i = 0; i < cxt.nrealreaders_stress; i++)
  706. torture_stop_kthread(lock_torture_reader,
  707. reader_tasks[i]);
  708. kfree(reader_tasks);
  709. reader_tasks = NULL;
  710. }
  711. torture_stop_kthread(lock_torture_stats, stats_task);
  712. lock_torture_stats_print(); /* -After- the stats thread is stopped! */
  713. if (atomic_read(&cxt.n_lock_torture_errors))
  714. lock_torture_print_module_parms(cxt.cur_ops,
  715. "End of test: FAILURE");
  716. else if (torture_onoff_failures())
  717. lock_torture_print_module_parms(cxt.cur_ops,
  718. "End of test: LOCK_HOTPLUG");
  719. else
  720. lock_torture_print_module_parms(cxt.cur_ops,
  721. "End of test: SUCCESS");
  722. kfree(cxt.lwsa);
  723. kfree(cxt.lrsa);
  724. end:
  725. torture_cleanup_end();
  726. }
  727. static int __init lock_torture_init(void)
  728. {
  729. int i, j;
  730. int firsterr = 0;
  731. static struct lock_torture_ops *torture_ops[] = {
  732. &lock_busted_ops,
  733. &spin_lock_ops, &spin_lock_irq_ops,
  734. &rw_lock_ops, &rw_lock_irq_ops,
  735. &mutex_lock_ops,
  736. &ww_mutex_lock_ops,
  737. #ifdef CONFIG_RT_MUTEXES
  738. &rtmutex_lock_ops,
  739. #endif
  740. &rwsem_lock_ops,
  741. &percpu_rwsem_lock_ops,
  742. };
  743. if (!torture_init_begin(torture_type, verbose))
  744. return -EBUSY;
  745. /* Process args and tell the world that the torturer is on the job. */
  746. for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
  747. cxt.cur_ops = torture_ops[i];
  748. if (strcmp(torture_type, cxt.cur_ops->name) == 0)
  749. break;
  750. }
  751. if (i == ARRAY_SIZE(torture_ops)) {
  752. pr_alert("lock-torture: invalid torture type: \"%s\"\n",
  753. torture_type);
  754. pr_alert("lock-torture types:");
  755. for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
  756. pr_alert(" %s", torture_ops[i]->name);
  757. pr_alert("\n");
  758. firsterr = -EINVAL;
  759. goto unwind;
  760. }
  761. if (nwriters_stress == 0 && nreaders_stress == 0) {
  762. pr_alert("lock-torture: must run at least one locking thread\n");
  763. firsterr = -EINVAL;
  764. goto unwind;
  765. }
  766. if (cxt.cur_ops->init)
  767. cxt.cur_ops->init();
  768. if (nwriters_stress >= 0)
  769. cxt.nrealwriters_stress = nwriters_stress;
  770. else
  771. cxt.nrealwriters_stress = 2 * num_online_cpus();
  772. #ifdef CONFIG_DEBUG_MUTEXES
  773. if (strncmp(torture_type, "mutex", 5) == 0)
  774. cxt.debug_lock = true;
  775. #endif
  776. #ifdef CONFIG_DEBUG_RT_MUTEXES
  777. if (strncmp(torture_type, "rtmutex", 7) == 0)
  778. cxt.debug_lock = true;
  779. #endif
  780. #ifdef CONFIG_DEBUG_SPINLOCK
  781. if ((strncmp(torture_type, "spin", 4) == 0) ||
  782. (strncmp(torture_type, "rw_lock", 7) == 0))
  783. cxt.debug_lock = true;
  784. #endif
  785. /* Initialize the statistics so that each run gets its own numbers. */
  786. if (nwriters_stress) {
  787. lock_is_write_held = 0;
  788. cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
  789. sizeof(*cxt.lwsa),
  790. GFP_KERNEL);
  791. if (cxt.lwsa == NULL) {
  792. VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
  793. firsterr = -ENOMEM;
  794. goto unwind;
  795. }
  796. for (i = 0; i < cxt.nrealwriters_stress; i++) {
  797. cxt.lwsa[i].n_lock_fail = 0;
  798. cxt.lwsa[i].n_lock_acquired = 0;
  799. }
  800. }
  801. if (cxt.cur_ops->readlock) {
  802. if (nreaders_stress >= 0)
  803. cxt.nrealreaders_stress = nreaders_stress;
  804. else {
  805. /*
  806. * By default distribute evenly the number of
  807. * readers and writers. We still run the same number
  808. * of threads as the writer-only locks default.
  809. */
  810. if (nwriters_stress < 0) /* user doesn't care */
  811. cxt.nrealwriters_stress = num_online_cpus();
  812. cxt.nrealreaders_stress = cxt.nrealwriters_stress;
  813. }
  814. if (nreaders_stress) {
  815. lock_is_read_held = 0;
  816. cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
  817. sizeof(*cxt.lrsa),
  818. GFP_KERNEL);
  819. if (cxt.lrsa == NULL) {
  820. VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
  821. firsterr = -ENOMEM;
  822. kfree(cxt.lwsa);
  823. cxt.lwsa = NULL;
  824. goto unwind;
  825. }
  826. for (i = 0; i < cxt.nrealreaders_stress; i++) {
  827. cxt.lrsa[i].n_lock_fail = 0;
  828. cxt.lrsa[i].n_lock_acquired = 0;
  829. }
  830. }
  831. }
  832. lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
  833. /* Prepare torture context. */
  834. if (onoff_interval > 0) {
  835. firsterr = torture_onoff_init(onoff_holdoff * HZ,
  836. onoff_interval * HZ);
  837. if (firsterr)
  838. goto unwind;
  839. }
  840. if (shuffle_interval > 0) {
  841. firsterr = torture_shuffle_init(shuffle_interval);
  842. if (firsterr)
  843. goto unwind;
  844. }
  845. if (shutdown_secs > 0) {
  846. firsterr = torture_shutdown_init(shutdown_secs,
  847. lock_torture_cleanup);
  848. if (firsterr)
  849. goto unwind;
  850. }
  851. if (stutter > 0) {
  852. firsterr = torture_stutter_init(stutter);
  853. if (firsterr)
  854. goto unwind;
  855. }
  856. if (nwriters_stress) {
  857. writer_tasks = kcalloc(cxt.nrealwriters_stress,
  858. sizeof(writer_tasks[0]),
  859. GFP_KERNEL);
  860. if (writer_tasks == NULL) {
  861. VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
  862. firsterr = -ENOMEM;
  863. goto unwind;
  864. }
  865. }
  866. if (cxt.cur_ops->readlock) {
  867. reader_tasks = kcalloc(cxt.nrealreaders_stress,
  868. sizeof(reader_tasks[0]),
  869. GFP_KERNEL);
  870. if (reader_tasks == NULL) {
  871. VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
  872. kfree(writer_tasks);
  873. writer_tasks = NULL;
  874. firsterr = -ENOMEM;
  875. goto unwind;
  876. }
  877. }
  878. /*
  879. * Create the kthreads and start torturing (oh, those poor little locks).
  880. *
  881. * TODO: Note that we interleave writers with readers, giving writers a
  882. * slight advantage, by creating its kthread first. This can be modified
  883. * for very specific needs, or even let the user choose the policy, if
  884. * ever wanted.
  885. */
  886. for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
  887. j < cxt.nrealreaders_stress; i++, j++) {
  888. if (i >= cxt.nrealwriters_stress)
  889. goto create_reader;
  890. /* Create writer. */
  891. firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
  892. writer_tasks[i]);
  893. if (firsterr)
  894. goto unwind;
  895. create_reader:
  896. if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
  897. continue;
  898. /* Create reader. */
  899. firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
  900. reader_tasks[j]);
  901. if (firsterr)
  902. goto unwind;
  903. }
  904. if (stat_interval > 0) {
  905. firsterr = torture_create_kthread(lock_torture_stats, NULL,
  906. stats_task);
  907. if (firsterr)
  908. goto unwind;
  909. }
  910. torture_init_end();
  911. return 0;
  912. unwind:
  913. torture_init_end();
  914. lock_torture_cleanup();
  915. return firsterr;
  916. }
  917. module_init(lock_torture_init);
  918. module_exit(lock_torture_cleanup);