amdgpu_sync.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sub license, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20. *
  21. * The above copyright notice and this permission notice (including the
  22. * next paragraph) shall be included in all copies or substantial portions
  23. * of the Software.
  24. *
  25. */
  26. /*
  27. * Authors:
  28. * Christian König <christian.koenig@amd.com>
  29. */
  30. #include <drm/drmP.h>
  31. #include "amdgpu.h"
  32. #include "amdgpu_trace.h"
  33. struct amdgpu_sync_entry {
  34. struct hlist_node node;
  35. struct fence *fence;
  36. };
  37. /**
  38. * amdgpu_sync_create - zero init sync object
  39. *
  40. * @sync: sync object to initialize
  41. *
  42. * Just clear the sync object for now.
  43. */
  44. void amdgpu_sync_create(struct amdgpu_sync *sync)
  45. {
  46. unsigned i;
  47. for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
  48. sync->semaphores[i] = NULL;
  49. for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
  50. sync->sync_to[i] = NULL;
  51. hash_init(sync->fences);
  52. sync->last_vm_update = NULL;
  53. }
  54. static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
  55. {
  56. struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
  57. struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
  58. if (a_fence)
  59. return a_fence->ring->adev == adev;
  60. if (s_fence) {
  61. struct amdgpu_ring *ring;
  62. ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
  63. return ring->adev == adev;
  64. }
  65. return false;
  66. }
  67. static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
  68. {
  69. struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
  70. struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
  71. if (s_fence)
  72. return s_fence->owner == owner;
  73. if (a_fence)
  74. return a_fence->owner == owner;
  75. return false;
  76. }
  77. static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
  78. {
  79. if (*keep && fence_is_later(*keep, fence))
  80. return;
  81. fence_put(*keep);
  82. *keep = fence_get(fence);
  83. }
  84. /**
  85. * amdgpu_sync_fence - remember to sync to this fence
  86. *
  87. * @sync: sync object to add fence to
  88. * @fence: fence to sync to
  89. *
  90. */
  91. int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
  92. struct fence *f)
  93. {
  94. struct amdgpu_sync_entry *e;
  95. struct amdgpu_fence *fence;
  96. struct amdgpu_fence *other;
  97. if (!f)
  98. return 0;
  99. if (amdgpu_sync_same_dev(adev, f) &&
  100. amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM))
  101. amdgpu_sync_keep_later(&sync->last_vm_update, f);
  102. fence = to_amdgpu_fence(f);
  103. if (!fence || fence->ring->adev != adev) {
  104. hash_for_each_possible(sync->fences, e, node, f->context) {
  105. if (unlikely(e->fence->context != f->context))
  106. continue;
  107. amdgpu_sync_keep_later(&e->fence, f);
  108. return 0;
  109. }
  110. e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
  111. if (!e)
  112. return -ENOMEM;
  113. hash_add(sync->fences, &e->node, f->context);
  114. e->fence = fence_get(f);
  115. return 0;
  116. }
  117. other = sync->sync_to[fence->ring->idx];
  118. sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
  119. amdgpu_fence_later(fence, other));
  120. amdgpu_fence_unref(&other);
  121. return 0;
  122. }
  123. static void *amdgpu_sync_get_owner(struct fence *f)
  124. {
  125. struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
  126. struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
  127. if (s_fence)
  128. return s_fence->owner;
  129. else if (a_fence)
  130. return a_fence->owner;
  131. return AMDGPU_FENCE_OWNER_UNDEFINED;
  132. }
  133. /**
  134. * amdgpu_sync_resv - use the semaphores to sync to a reservation object
  135. *
  136. * @sync: sync object to add fences from reservation object to
  137. * @resv: reservation object with embedded fence
  138. * @shared: true if we should only sync to the exclusive fence
  139. *
  140. * Sync to the fence using the semaphore objects
  141. */
  142. int amdgpu_sync_resv(struct amdgpu_device *adev,
  143. struct amdgpu_sync *sync,
  144. struct reservation_object *resv,
  145. void *owner)
  146. {
  147. struct reservation_object_list *flist;
  148. struct fence *f;
  149. void *fence_owner;
  150. unsigned i;
  151. int r = 0;
  152. if (resv == NULL)
  153. return -EINVAL;
  154. /* always sync to the exclusive fence */
  155. f = reservation_object_get_excl(resv);
  156. r = amdgpu_sync_fence(adev, sync, f);
  157. flist = reservation_object_get_list(resv);
  158. if (!flist || r)
  159. return r;
  160. for (i = 0; i < flist->shared_count; ++i) {
  161. f = rcu_dereference_protected(flist->shared[i],
  162. reservation_object_held(resv));
  163. if (amdgpu_sync_same_dev(adev, f)) {
  164. /* VM updates are only interesting
  165. * for other VM updates and moves.
  166. */
  167. fence_owner = amdgpu_sync_get_owner(f);
  168. if ((owner != AMDGPU_FENCE_OWNER_MOVE) &&
  169. (fence_owner != AMDGPU_FENCE_OWNER_MOVE) &&
  170. ((owner == AMDGPU_FENCE_OWNER_VM) !=
  171. (fence_owner == AMDGPU_FENCE_OWNER_VM)))
  172. continue;
  173. /* Ignore fence from the same owner as
  174. * long as it isn't undefined.
  175. */
  176. if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
  177. fence_owner == owner)
  178. continue;
  179. }
  180. r = amdgpu_sync_fence(adev, sync, f);
  181. if (r)
  182. break;
  183. }
  184. return r;
  185. }
  186. struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
  187. {
  188. struct amdgpu_sync_entry *e;
  189. struct hlist_node *tmp;
  190. struct fence *f;
  191. int i;
  192. hash_for_each_safe(sync->fences, i, tmp, e, node) {
  193. f = e->fence;
  194. hash_del(&e->node);
  195. kfree(e);
  196. if (!fence_is_signaled(f))
  197. return f;
  198. fence_put(f);
  199. }
  200. return NULL;
  201. }
  202. int amdgpu_sync_wait(struct amdgpu_sync *sync)
  203. {
  204. struct amdgpu_sync_entry *e;
  205. struct hlist_node *tmp;
  206. int i, r;
  207. hash_for_each_safe(sync->fences, i, tmp, e, node) {
  208. r = fence_wait(e->fence, false);
  209. if (r)
  210. return r;
  211. hash_del(&e->node);
  212. fence_put(e->fence);
  213. kfree(e);
  214. }
  215. if (amdgpu_enable_semaphores)
  216. return 0;
  217. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  218. struct amdgpu_fence *fence = sync->sync_to[i];
  219. if (!fence)
  220. continue;
  221. r = fence_wait(&fence->base, false);
  222. if (r)
  223. return r;
  224. }
  225. return 0;
  226. }
  227. /**
  228. * amdgpu_sync_rings - sync ring to all registered fences
  229. *
  230. * @sync: sync object to use
  231. * @ring: ring that needs sync
  232. *
  233. * Ensure that all registered fences are signaled before letting
  234. * the ring continue. The caller must hold the ring lock.
  235. */
  236. int amdgpu_sync_rings(struct amdgpu_sync *sync,
  237. struct amdgpu_ring *ring)
  238. {
  239. struct amdgpu_device *adev = ring->adev;
  240. unsigned count = 0;
  241. int i, r;
  242. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  243. struct amdgpu_fence *fence = sync->sync_to[i];
  244. struct amdgpu_semaphore *semaphore;
  245. struct amdgpu_ring *other = adev->rings[i];
  246. /* check if we really need to sync */
  247. if (!amdgpu_fence_need_sync(fence, ring))
  248. continue;
  249. /* prevent GPU deadlocks */
  250. if (!other->ready) {
  251. dev_err(adev->dev, "Syncing to a disabled ring!");
  252. return -EINVAL;
  253. }
  254. if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores ||
  255. (count >= AMDGPU_NUM_SYNCS)) {
  256. /* not enough room, wait manually */
  257. r = fence_wait(&fence->base, false);
  258. if (r)
  259. return r;
  260. continue;
  261. }
  262. r = amdgpu_semaphore_create(adev, &semaphore);
  263. if (r)
  264. return r;
  265. sync->semaphores[count++] = semaphore;
  266. /* allocate enough space for sync command */
  267. r = amdgpu_ring_alloc(other, 16);
  268. if (r)
  269. return r;
  270. /* emit the signal semaphore */
  271. if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
  272. /* signaling wasn't successful wait manually */
  273. amdgpu_ring_undo(other);
  274. r = fence_wait(&fence->base, false);
  275. if (r)
  276. return r;
  277. continue;
  278. }
  279. /* we assume caller has already allocated space on waiters ring */
  280. if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
  281. /* waiting wasn't successful wait manually */
  282. amdgpu_ring_undo(other);
  283. r = fence_wait(&fence->base, false);
  284. if (r)
  285. return r;
  286. continue;
  287. }
  288. amdgpu_ring_commit(other);
  289. amdgpu_fence_note_sync(fence, ring);
  290. }
  291. return 0;
  292. }
  293. /**
  294. * amdgpu_sync_free - free the sync object
  295. *
  296. * @adev: amdgpu_device pointer
  297. * @sync: sync object to use
  298. * @fence: fence to use for the free
  299. *
  300. * Free the sync object by freeing all semaphores in it.
  301. */
  302. void amdgpu_sync_free(struct amdgpu_device *adev,
  303. struct amdgpu_sync *sync,
  304. struct fence *fence)
  305. {
  306. struct amdgpu_sync_entry *e;
  307. struct hlist_node *tmp;
  308. unsigned i;
  309. hash_for_each_safe(sync->fences, i, tmp, e, node) {
  310. hash_del(&e->node);
  311. fence_put(e->fence);
  312. kfree(e);
  313. }
  314. for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
  315. amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
  316. for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
  317. amdgpu_fence_unref(&sync->sync_to[i]);
  318. fence_put(sync->last_vm_update);
  319. }