ttm_lock.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. /* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2. /**************************************************************************
  3. *
  4. * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. /*
  29. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  30. */
  31. #include <drm/ttm/ttm_lock.h>
  32. #include <drm/ttm/ttm_module.h>
  33. #include <linux/atomic.h>
  34. #include <linux/errno.h>
  35. #include <linux/wait.h>
  36. #include <linux/sched/signal.h>
  37. #include <linux/module.h>
  38. #define TTM_WRITE_LOCK_PENDING (1 << 0)
  39. #define TTM_VT_LOCK_PENDING (1 << 1)
  40. #define TTM_SUSPEND_LOCK_PENDING (1 << 2)
  41. #define TTM_VT_LOCK (1 << 3)
  42. #define TTM_SUSPEND_LOCK (1 << 4)
  43. void ttm_lock_init(struct ttm_lock *lock)
  44. {
  45. spin_lock_init(&lock->lock);
  46. init_waitqueue_head(&lock->queue);
  47. lock->rw = 0;
  48. lock->flags = 0;
  49. lock->kill_takers = false;
  50. lock->signal = SIGKILL;
  51. }
  52. EXPORT_SYMBOL(ttm_lock_init);
  53. void ttm_read_unlock(struct ttm_lock *lock)
  54. {
  55. spin_lock(&lock->lock);
  56. if (--lock->rw == 0)
  57. wake_up_all(&lock->queue);
  58. spin_unlock(&lock->lock);
  59. }
  60. EXPORT_SYMBOL(ttm_read_unlock);
  61. static bool __ttm_read_lock(struct ttm_lock *lock)
  62. {
  63. bool locked = false;
  64. spin_lock(&lock->lock);
  65. if (unlikely(lock->kill_takers)) {
  66. send_sig(lock->signal, current, 0);
  67. spin_unlock(&lock->lock);
  68. return false;
  69. }
  70. if (lock->rw >= 0 && lock->flags == 0) {
  71. ++lock->rw;
  72. locked = true;
  73. }
  74. spin_unlock(&lock->lock);
  75. return locked;
  76. }
  77. int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
  78. {
  79. int ret = 0;
  80. if (interruptible)
  81. ret = wait_event_interruptible(lock->queue,
  82. __ttm_read_lock(lock));
  83. else
  84. wait_event(lock->queue, __ttm_read_lock(lock));
  85. return ret;
  86. }
  87. EXPORT_SYMBOL(ttm_read_lock);
  88. static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
  89. {
  90. bool block = true;
  91. *locked = false;
  92. spin_lock(&lock->lock);
  93. if (unlikely(lock->kill_takers)) {
  94. send_sig(lock->signal, current, 0);
  95. spin_unlock(&lock->lock);
  96. return false;
  97. }
  98. if (lock->rw >= 0 && lock->flags == 0) {
  99. ++lock->rw;
  100. block = false;
  101. *locked = true;
  102. } else if (lock->flags == 0) {
  103. block = false;
  104. }
  105. spin_unlock(&lock->lock);
  106. return !block;
  107. }
  108. int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
  109. {
  110. int ret = 0;
  111. bool locked;
  112. if (interruptible)
  113. ret = wait_event_interruptible
  114. (lock->queue, __ttm_read_trylock(lock, &locked));
  115. else
  116. wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
  117. if (unlikely(ret != 0)) {
  118. BUG_ON(locked);
  119. return ret;
  120. }
  121. return (locked) ? 0 : -EBUSY;
  122. }
  123. void ttm_write_unlock(struct ttm_lock *lock)
  124. {
  125. spin_lock(&lock->lock);
  126. lock->rw = 0;
  127. wake_up_all(&lock->queue);
  128. spin_unlock(&lock->lock);
  129. }
  130. EXPORT_SYMBOL(ttm_write_unlock);
  131. static bool __ttm_write_lock(struct ttm_lock *lock)
  132. {
  133. bool locked = false;
  134. spin_lock(&lock->lock);
  135. if (unlikely(lock->kill_takers)) {
  136. send_sig(lock->signal, current, 0);
  137. spin_unlock(&lock->lock);
  138. return false;
  139. }
  140. if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
  141. lock->rw = -1;
  142. lock->flags &= ~TTM_WRITE_LOCK_PENDING;
  143. locked = true;
  144. } else {
  145. lock->flags |= TTM_WRITE_LOCK_PENDING;
  146. }
  147. spin_unlock(&lock->lock);
  148. return locked;
  149. }
  150. int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
  151. {
  152. int ret = 0;
  153. if (interruptible) {
  154. ret = wait_event_interruptible(lock->queue,
  155. __ttm_write_lock(lock));
  156. if (unlikely(ret != 0)) {
  157. spin_lock(&lock->lock);
  158. lock->flags &= ~TTM_WRITE_LOCK_PENDING;
  159. wake_up_all(&lock->queue);
  160. spin_unlock(&lock->lock);
  161. }
  162. } else
  163. wait_event(lock->queue, __ttm_write_lock(lock));
  164. return ret;
  165. }
  166. EXPORT_SYMBOL(ttm_write_lock);
  167. static int __ttm_vt_unlock(struct ttm_lock *lock)
  168. {
  169. int ret = 0;
  170. spin_lock(&lock->lock);
  171. if (unlikely(!(lock->flags & TTM_VT_LOCK)))
  172. ret = -EINVAL;
  173. lock->flags &= ~TTM_VT_LOCK;
  174. wake_up_all(&lock->queue);
  175. spin_unlock(&lock->lock);
  176. return ret;
  177. }
  178. static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
  179. {
  180. struct ttm_base_object *base = *p_base;
  181. struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
  182. int ret;
  183. *p_base = NULL;
  184. ret = __ttm_vt_unlock(lock);
  185. BUG_ON(ret != 0);
  186. }
  187. static bool __ttm_vt_lock(struct ttm_lock *lock)
  188. {
  189. bool locked = false;
  190. spin_lock(&lock->lock);
  191. if (lock->rw == 0) {
  192. lock->flags &= ~TTM_VT_LOCK_PENDING;
  193. lock->flags |= TTM_VT_LOCK;
  194. locked = true;
  195. } else {
  196. lock->flags |= TTM_VT_LOCK_PENDING;
  197. }
  198. spin_unlock(&lock->lock);
  199. return locked;
  200. }
  201. int ttm_vt_lock(struct ttm_lock *lock,
  202. bool interruptible,
  203. struct ttm_object_file *tfile)
  204. {
  205. int ret = 0;
  206. if (interruptible) {
  207. ret = wait_event_interruptible(lock->queue,
  208. __ttm_vt_lock(lock));
  209. if (unlikely(ret != 0)) {
  210. spin_lock(&lock->lock);
  211. lock->flags &= ~TTM_VT_LOCK_PENDING;
  212. wake_up_all(&lock->queue);
  213. spin_unlock(&lock->lock);
  214. return ret;
  215. }
  216. } else
  217. wait_event(lock->queue, __ttm_vt_lock(lock));
  218. /*
  219. * Add a base-object, the destructor of which will
  220. * make sure the lock is released if the client dies
  221. * while holding it.
  222. */
  223. ret = ttm_base_object_init(tfile, &lock->base, false,
  224. ttm_lock_type, &ttm_vt_lock_remove, NULL);
  225. if (ret)
  226. (void)__ttm_vt_unlock(lock);
  227. else
  228. lock->vt_holder = tfile;
  229. return ret;
  230. }
  231. EXPORT_SYMBOL(ttm_vt_lock);
  232. int ttm_vt_unlock(struct ttm_lock *lock)
  233. {
  234. return ttm_ref_object_base_unref(lock->vt_holder,
  235. lock->base.hash.key, TTM_REF_USAGE);
  236. }
  237. EXPORT_SYMBOL(ttm_vt_unlock);
  238. void ttm_suspend_unlock(struct ttm_lock *lock)
  239. {
  240. spin_lock(&lock->lock);
  241. lock->flags &= ~TTM_SUSPEND_LOCK;
  242. wake_up_all(&lock->queue);
  243. spin_unlock(&lock->lock);
  244. }
  245. EXPORT_SYMBOL(ttm_suspend_unlock);
  246. static bool __ttm_suspend_lock(struct ttm_lock *lock)
  247. {
  248. bool locked = false;
  249. spin_lock(&lock->lock);
  250. if (lock->rw == 0) {
  251. lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
  252. lock->flags |= TTM_SUSPEND_LOCK;
  253. locked = true;
  254. } else {
  255. lock->flags |= TTM_SUSPEND_LOCK_PENDING;
  256. }
  257. spin_unlock(&lock->lock);
  258. return locked;
  259. }
  260. void ttm_suspend_lock(struct ttm_lock *lock)
  261. {
  262. wait_event(lock->queue, __ttm_suspend_lock(lock));
  263. }
  264. EXPORT_SYMBOL(ttm_suspend_lock);