test-ww_mutex.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. /*
  2. * Module-based API test facility for ww_mutexes
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, you can access it online at
  16. * http://www.gnu.org/licenses/gpl-2.0.html.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/completion.h>
  20. #include <linux/delay.h>
  21. #include <linux/kthread.h>
  22. #include <linux/module.h>
  23. #include <linux/random.h>
  24. #include <linux/slab.h>
  25. #include <linux/ww_mutex.h>
  26. static DEFINE_WW_CLASS(ww_class);
  27. struct workqueue_struct *wq;
  28. struct test_mutex {
  29. struct work_struct work;
  30. struct ww_mutex mutex;
  31. struct completion ready, go, done;
  32. unsigned int flags;
  33. };
  34. #define TEST_MTX_SPIN BIT(0)
  35. #define TEST_MTX_TRY BIT(1)
  36. #define TEST_MTX_CTX BIT(2)
  37. #define __TEST_MTX_LAST BIT(3)
  38. static void test_mutex_work(struct work_struct *work)
  39. {
  40. struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
  41. complete(&mtx->ready);
  42. wait_for_completion(&mtx->go);
  43. if (mtx->flags & TEST_MTX_TRY) {
  44. while (!ww_mutex_trylock(&mtx->mutex))
  45. cond_resched();
  46. } else {
  47. ww_mutex_lock(&mtx->mutex, NULL);
  48. }
  49. complete(&mtx->done);
  50. ww_mutex_unlock(&mtx->mutex);
  51. }
  52. static int __test_mutex(unsigned int flags)
  53. {
  54. #define TIMEOUT (HZ / 16)
  55. struct test_mutex mtx;
  56. struct ww_acquire_ctx ctx;
  57. int ret;
  58. ww_mutex_init(&mtx.mutex, &ww_class);
  59. ww_acquire_init(&ctx, &ww_class);
  60. INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
  61. init_completion(&mtx.ready);
  62. init_completion(&mtx.go);
  63. init_completion(&mtx.done);
  64. mtx.flags = flags;
  65. schedule_work(&mtx.work);
  66. wait_for_completion(&mtx.ready);
  67. ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
  68. complete(&mtx.go);
  69. if (flags & TEST_MTX_SPIN) {
  70. unsigned long timeout = jiffies + TIMEOUT;
  71. ret = 0;
  72. do {
  73. if (completion_done(&mtx.done)) {
  74. ret = -EINVAL;
  75. break;
  76. }
  77. cond_resched();
  78. } while (time_before(jiffies, timeout));
  79. } else {
  80. ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
  81. }
  82. ww_mutex_unlock(&mtx.mutex);
  83. ww_acquire_fini(&ctx);
  84. if (ret) {
  85. pr_err("%s(flags=%x): mutual exclusion failure\n",
  86. __func__, flags);
  87. ret = -EINVAL;
  88. }
  89. flush_work(&mtx.work);
  90. destroy_work_on_stack(&mtx.work);
  91. return ret;
  92. #undef TIMEOUT
  93. }
  94. static int test_mutex(void)
  95. {
  96. int ret;
  97. int i;
  98. for (i = 0; i < __TEST_MTX_LAST; i++) {
  99. ret = __test_mutex(i);
  100. if (ret)
  101. return ret;
  102. }
  103. return 0;
  104. }
  105. static int test_aa(void)
  106. {
  107. struct ww_mutex mutex;
  108. struct ww_acquire_ctx ctx;
  109. int ret;
  110. ww_mutex_init(&mutex, &ww_class);
  111. ww_acquire_init(&ctx, &ww_class);
  112. ww_mutex_lock(&mutex, &ctx);
  113. if (ww_mutex_trylock(&mutex)) {
  114. pr_err("%s: trylocked itself!\n", __func__);
  115. ww_mutex_unlock(&mutex);
  116. ret = -EINVAL;
  117. goto out;
  118. }
  119. ret = ww_mutex_lock(&mutex, &ctx);
  120. if (ret != -EALREADY) {
  121. pr_err("%s: missed deadlock for recursing, ret=%d\n",
  122. __func__, ret);
  123. if (!ret)
  124. ww_mutex_unlock(&mutex);
  125. ret = -EINVAL;
  126. goto out;
  127. }
  128. ret = 0;
  129. out:
  130. ww_mutex_unlock(&mutex);
  131. ww_acquire_fini(&ctx);
  132. return ret;
  133. }
  134. struct test_abba {
  135. struct work_struct work;
  136. struct ww_mutex a_mutex;
  137. struct ww_mutex b_mutex;
  138. struct completion a_ready;
  139. struct completion b_ready;
  140. bool resolve;
  141. int result;
  142. };
  143. static void test_abba_work(struct work_struct *work)
  144. {
  145. struct test_abba *abba = container_of(work, typeof(*abba), work);
  146. struct ww_acquire_ctx ctx;
  147. int err;
  148. ww_acquire_init(&ctx, &ww_class);
  149. ww_mutex_lock(&abba->b_mutex, &ctx);
  150. complete(&abba->b_ready);
  151. wait_for_completion(&abba->a_ready);
  152. err = ww_mutex_lock(&abba->a_mutex, &ctx);
  153. if (abba->resolve && err == -EDEADLK) {
  154. ww_mutex_unlock(&abba->b_mutex);
  155. ww_mutex_lock_slow(&abba->a_mutex, &ctx);
  156. err = ww_mutex_lock(&abba->b_mutex, &ctx);
  157. }
  158. if (!err)
  159. ww_mutex_unlock(&abba->a_mutex);
  160. ww_mutex_unlock(&abba->b_mutex);
  161. ww_acquire_fini(&ctx);
  162. abba->result = err;
  163. }
  164. static int test_abba(bool resolve)
  165. {
  166. struct test_abba abba;
  167. struct ww_acquire_ctx ctx;
  168. int err, ret;
  169. ww_mutex_init(&abba.a_mutex, &ww_class);
  170. ww_mutex_init(&abba.b_mutex, &ww_class);
  171. INIT_WORK_ONSTACK(&abba.work, test_abba_work);
  172. init_completion(&abba.a_ready);
  173. init_completion(&abba.b_ready);
  174. abba.resolve = resolve;
  175. schedule_work(&abba.work);
  176. ww_acquire_init(&ctx, &ww_class);
  177. ww_mutex_lock(&abba.a_mutex, &ctx);
  178. complete(&abba.a_ready);
  179. wait_for_completion(&abba.b_ready);
  180. err = ww_mutex_lock(&abba.b_mutex, &ctx);
  181. if (resolve && err == -EDEADLK) {
  182. ww_mutex_unlock(&abba.a_mutex);
  183. ww_mutex_lock_slow(&abba.b_mutex, &ctx);
  184. err = ww_mutex_lock(&abba.a_mutex, &ctx);
  185. }
  186. if (!err)
  187. ww_mutex_unlock(&abba.b_mutex);
  188. ww_mutex_unlock(&abba.a_mutex);
  189. ww_acquire_fini(&ctx);
  190. flush_work(&abba.work);
  191. destroy_work_on_stack(&abba.work);
  192. ret = 0;
  193. if (resolve) {
  194. if (err || abba.result) {
  195. pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
  196. __func__, err, abba.result);
  197. ret = -EINVAL;
  198. }
  199. } else {
  200. if (err != -EDEADLK && abba.result != -EDEADLK) {
  201. pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
  202. __func__, err, abba.result);
  203. ret = -EINVAL;
  204. }
  205. }
  206. return ret;
  207. }
  208. struct test_cycle {
  209. struct work_struct work;
  210. struct ww_mutex a_mutex;
  211. struct ww_mutex *b_mutex;
  212. struct completion *a_signal;
  213. struct completion b_signal;
  214. int result;
  215. };
  216. static void test_cycle_work(struct work_struct *work)
  217. {
  218. struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
  219. struct ww_acquire_ctx ctx;
  220. int err;
  221. ww_acquire_init(&ctx, &ww_class);
  222. ww_mutex_lock(&cycle->a_mutex, &ctx);
  223. complete(cycle->a_signal);
  224. wait_for_completion(&cycle->b_signal);
  225. err = ww_mutex_lock(cycle->b_mutex, &ctx);
  226. if (err == -EDEADLK) {
  227. ww_mutex_unlock(&cycle->a_mutex);
  228. ww_mutex_lock_slow(cycle->b_mutex, &ctx);
  229. err = ww_mutex_lock(&cycle->a_mutex, &ctx);
  230. }
  231. if (!err)
  232. ww_mutex_unlock(cycle->b_mutex);
  233. ww_mutex_unlock(&cycle->a_mutex);
  234. ww_acquire_fini(&ctx);
  235. cycle->result = err;
  236. }
  237. static int __test_cycle(unsigned int nthreads)
  238. {
  239. struct test_cycle *cycles;
  240. unsigned int n, last = nthreads - 1;
  241. int ret;
  242. cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
  243. if (!cycles)
  244. return -ENOMEM;
  245. for (n = 0; n < nthreads; n++) {
  246. struct test_cycle *cycle = &cycles[n];
  247. ww_mutex_init(&cycle->a_mutex, &ww_class);
  248. if (n == last)
  249. cycle->b_mutex = &cycles[0].a_mutex;
  250. else
  251. cycle->b_mutex = &cycles[n + 1].a_mutex;
  252. if (n == 0)
  253. cycle->a_signal = &cycles[last].b_signal;
  254. else
  255. cycle->a_signal = &cycles[n - 1].b_signal;
  256. init_completion(&cycle->b_signal);
  257. INIT_WORK(&cycle->work, test_cycle_work);
  258. cycle->result = 0;
  259. }
  260. for (n = 0; n < nthreads; n++)
  261. queue_work(wq, &cycles[n].work);
  262. flush_workqueue(wq);
  263. ret = 0;
  264. for (n = 0; n < nthreads; n++) {
  265. struct test_cycle *cycle = &cycles[n];
  266. if (!cycle->result)
  267. continue;
  268. pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
  269. n, nthreads, cycle->result);
  270. ret = -EINVAL;
  271. break;
  272. }
  273. for (n = 0; n < nthreads; n++)
  274. ww_mutex_destroy(&cycles[n].a_mutex);
  275. kfree(cycles);
  276. return ret;
  277. }
  278. static int test_cycle(unsigned int ncpus)
  279. {
  280. unsigned int n;
  281. int ret;
  282. for (n = 2; n <= ncpus + 1; n++) {
  283. ret = __test_cycle(n);
  284. if (ret)
  285. return ret;
  286. }
  287. return 0;
  288. }
  289. struct stress {
  290. struct work_struct work;
  291. struct ww_mutex *locks;
  292. unsigned long timeout;
  293. int nlocks;
  294. };
  295. static int *get_random_order(int count)
  296. {
  297. int *order;
  298. int n, r, tmp;
  299. order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY);
  300. if (!order)
  301. return order;
  302. for (n = 0; n < count; n++)
  303. order[n] = n;
  304. for (n = count - 1; n > 1; n--) {
  305. r = get_random_int() % (n + 1);
  306. if (r != n) {
  307. tmp = order[n];
  308. order[n] = order[r];
  309. order[r] = tmp;
  310. }
  311. }
  312. return order;
  313. }
  314. static void dummy_load(struct stress *stress)
  315. {
  316. usleep_range(1000, 2000);
  317. }
  318. static void stress_inorder_work(struct work_struct *work)
  319. {
  320. struct stress *stress = container_of(work, typeof(*stress), work);
  321. const int nlocks = stress->nlocks;
  322. struct ww_mutex *locks = stress->locks;
  323. struct ww_acquire_ctx ctx;
  324. int *order;
  325. order = get_random_order(nlocks);
  326. if (!order)
  327. return;
  328. do {
  329. int contended = -1;
  330. int n, err;
  331. ww_acquire_init(&ctx, &ww_class);
  332. retry:
  333. err = 0;
  334. for (n = 0; n < nlocks; n++) {
  335. if (n == contended)
  336. continue;
  337. err = ww_mutex_lock(&locks[order[n]], &ctx);
  338. if (err < 0)
  339. break;
  340. }
  341. if (!err)
  342. dummy_load(stress);
  343. if (contended > n)
  344. ww_mutex_unlock(&locks[order[contended]]);
  345. contended = n;
  346. while (n--)
  347. ww_mutex_unlock(&locks[order[n]]);
  348. if (err == -EDEADLK) {
  349. ww_mutex_lock_slow(&locks[order[contended]], &ctx);
  350. goto retry;
  351. }
  352. if (err) {
  353. pr_err_once("stress (%s) failed with %d\n",
  354. __func__, err);
  355. break;
  356. }
  357. ww_acquire_fini(&ctx);
  358. } while (!time_after(jiffies, stress->timeout));
  359. kfree(order);
  360. kfree(stress);
  361. }
  362. struct reorder_lock {
  363. struct list_head link;
  364. struct ww_mutex *lock;
  365. };
  366. static void stress_reorder_work(struct work_struct *work)
  367. {
  368. struct stress *stress = container_of(work, typeof(*stress), work);
  369. LIST_HEAD(locks);
  370. struct ww_acquire_ctx ctx;
  371. struct reorder_lock *ll, *ln;
  372. int *order;
  373. int n, err;
  374. order = get_random_order(stress->nlocks);
  375. if (!order)
  376. return;
  377. for (n = 0; n < stress->nlocks; n++) {
  378. ll = kmalloc(sizeof(*ll), GFP_KERNEL);
  379. if (!ll)
  380. goto out;
  381. ll->lock = &stress->locks[order[n]];
  382. list_add(&ll->link, &locks);
  383. }
  384. kfree(order);
  385. order = NULL;
  386. do {
  387. ww_acquire_init(&ctx, &ww_class);
  388. list_for_each_entry(ll, &locks, link) {
  389. err = ww_mutex_lock(ll->lock, &ctx);
  390. if (!err)
  391. continue;
  392. ln = ll;
  393. list_for_each_entry_continue_reverse(ln, &locks, link)
  394. ww_mutex_unlock(ln->lock);
  395. if (err != -EDEADLK) {
  396. pr_err_once("stress (%s) failed with %d\n",
  397. __func__, err);
  398. break;
  399. }
  400. ww_mutex_lock_slow(ll->lock, &ctx);
  401. list_move(&ll->link, &locks); /* restarts iteration */
  402. }
  403. dummy_load(stress);
  404. list_for_each_entry(ll, &locks, link)
  405. ww_mutex_unlock(ll->lock);
  406. ww_acquire_fini(&ctx);
  407. } while (!time_after(jiffies, stress->timeout));
  408. out:
  409. list_for_each_entry_safe(ll, ln, &locks, link)
  410. kfree(ll);
  411. kfree(order);
  412. kfree(stress);
  413. }
  414. static void stress_one_work(struct work_struct *work)
  415. {
  416. struct stress *stress = container_of(work, typeof(*stress), work);
  417. const int nlocks = stress->nlocks;
  418. struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
  419. int err;
  420. do {
  421. err = ww_mutex_lock(lock, NULL);
  422. if (!err) {
  423. dummy_load(stress);
  424. ww_mutex_unlock(lock);
  425. } else {
  426. pr_err_once("stress (%s) failed with %d\n",
  427. __func__, err);
  428. break;
  429. }
  430. } while (!time_after(jiffies, stress->timeout));
  431. kfree(stress);
  432. }
  433. #define STRESS_INORDER BIT(0)
  434. #define STRESS_REORDER BIT(1)
  435. #define STRESS_ONE BIT(2)
  436. #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
  437. static int stress(int nlocks, int nthreads, unsigned int flags)
  438. {
  439. struct ww_mutex *locks;
  440. int n;
  441. locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
  442. if (!locks)
  443. return -ENOMEM;
  444. for (n = 0; n < nlocks; n++)
  445. ww_mutex_init(&locks[n], &ww_class);
  446. for (n = 0; nthreads; n++) {
  447. struct stress *stress;
  448. void (*fn)(struct work_struct *work);
  449. fn = NULL;
  450. switch (n & 3) {
  451. case 0:
  452. if (flags & STRESS_INORDER)
  453. fn = stress_inorder_work;
  454. break;
  455. case 1:
  456. if (flags & STRESS_REORDER)
  457. fn = stress_reorder_work;
  458. break;
  459. case 2:
  460. if (flags & STRESS_ONE)
  461. fn = stress_one_work;
  462. break;
  463. }
  464. if (!fn)
  465. continue;
  466. stress = kmalloc(sizeof(*stress), GFP_KERNEL);
  467. if (!stress)
  468. break;
  469. INIT_WORK(&stress->work, fn);
  470. stress->locks = locks;
  471. stress->nlocks = nlocks;
  472. stress->timeout = jiffies + 2*HZ;
  473. queue_work(wq, &stress->work);
  474. nthreads--;
  475. }
  476. flush_workqueue(wq);
  477. for (n = 0; n < nlocks; n++)
  478. ww_mutex_destroy(&locks[n]);
  479. kfree(locks);
  480. return 0;
  481. }
  482. static int __init test_ww_mutex_init(void)
  483. {
  484. int ncpus = num_online_cpus();
  485. int ret;
  486. wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
  487. if (!wq)
  488. return -ENOMEM;
  489. ret = test_mutex();
  490. if (ret)
  491. return ret;
  492. ret = test_aa();
  493. if (ret)
  494. return ret;
  495. ret = test_abba(false);
  496. if (ret)
  497. return ret;
  498. ret = test_abba(true);
  499. if (ret)
  500. return ret;
  501. ret = test_cycle(ncpus);
  502. if (ret)
  503. return ret;
  504. ret = stress(16, 2*ncpus, STRESS_INORDER);
  505. if (ret)
  506. return ret;
  507. ret = stress(16, 2*ncpus, STRESS_REORDER);
  508. if (ret)
  509. return ret;
  510. ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
  511. if (ret)
  512. return ret;
  513. return 0;
  514. }
  515. static void __exit test_ww_mutex_exit(void)
  516. {
  517. destroy_workqueue(wq);
  518. }
  519. module_init(test_ww_mutex_init);
  520. module_exit(test_ww_mutex_exit);
  521. MODULE_LICENSE("GPL");
  522. MODULE_AUTHOR("Intel Corporation");