amdgpu_ctx.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: monk liu <monk.liu@amd.com>
  23. */
  24. #include <drm/drmP.h>
  25. #include "amdgpu.h"
  26. int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
  27. struct amdgpu_ctx *ctx)
  28. {
  29. unsigned i, j;
  30. int r;
  31. memset(ctx, 0, sizeof(*ctx));
  32. ctx->adev = adev;
  33. kref_init(&ctx->refcount);
  34. spin_lock_init(&ctx->ring_lock);
  35. for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
  36. ctx->rings[i].sequence = 1;
  37. if (amdgpu_enable_scheduler) {
  38. /* create context entity for each ring */
  39. for (i = 0; i < adev->num_rings; i++) {
  40. struct amd_sched_rq *rq;
  41. if (kernel)
  42. rq = &adev->rings[i]->scheduler->kernel_rq;
  43. else
  44. rq = &adev->rings[i]->scheduler->sched_rq;
  45. r = amd_sched_entity_init(adev->rings[i]->scheduler,
  46. &ctx->rings[i].entity,
  47. rq, amdgpu_sched_jobs);
  48. if (r)
  49. break;
  50. }
  51. if (i < adev->num_rings) {
  52. for (j = 0; j < i; j++)
  53. amd_sched_entity_fini(adev->rings[j]->scheduler,
  54. &ctx->rings[j].entity);
  55. kfree(ctx);
  56. return r;
  57. }
  58. }
  59. return 0;
  60. }
  61. void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
  62. {
  63. struct amdgpu_device *adev = ctx->adev;
  64. unsigned i, j;
  65. for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
  66. for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
  67. fence_put(ctx->rings[i].fences[j]);
  68. if (amdgpu_enable_scheduler) {
  69. for (i = 0; i < adev->num_rings; i++)
  70. amd_sched_entity_fini(adev->rings[i]->scheduler,
  71. &ctx->rings[i].entity);
  72. }
  73. }
  74. static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
  75. struct amdgpu_fpriv *fpriv,
  76. uint32_t *id)
  77. {
  78. struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
  79. struct amdgpu_ctx *ctx;
  80. int r;
  81. ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  82. if (!ctx)
  83. return -ENOMEM;
  84. mutex_lock(&mgr->lock);
  85. r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
  86. if (r < 0) {
  87. mutex_unlock(&mgr->lock);
  88. kfree(ctx);
  89. return r;
  90. }
  91. *id = (uint32_t)r;
  92. r = amdgpu_ctx_init(adev, false, ctx);
  93. mutex_unlock(&mgr->lock);
  94. return r;
  95. }
  96. static void amdgpu_ctx_do_release(struct kref *ref)
  97. {
  98. struct amdgpu_ctx *ctx;
  99. ctx = container_of(ref, struct amdgpu_ctx, refcount);
  100. amdgpu_ctx_fini(ctx);
  101. kfree(ctx);
  102. }
  103. static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
  104. {
  105. struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
  106. struct amdgpu_ctx *ctx;
  107. mutex_lock(&mgr->lock);
  108. ctx = idr_find(&mgr->ctx_handles, id);
  109. if (ctx) {
  110. idr_remove(&mgr->ctx_handles, id);
  111. kref_put(&ctx->refcount, amdgpu_ctx_do_release);
  112. mutex_unlock(&mgr->lock);
  113. return 0;
  114. }
  115. mutex_unlock(&mgr->lock);
  116. return -EINVAL;
  117. }
  118. static int amdgpu_ctx_query(struct amdgpu_device *adev,
  119. struct amdgpu_fpriv *fpriv, uint32_t id,
  120. union drm_amdgpu_ctx_out *out)
  121. {
  122. struct amdgpu_ctx *ctx;
  123. struct amdgpu_ctx_mgr *mgr;
  124. unsigned reset_counter;
  125. if (!fpriv)
  126. return -EINVAL;
  127. mgr = &fpriv->ctx_mgr;
  128. mutex_lock(&mgr->lock);
  129. ctx = idr_find(&mgr->ctx_handles, id);
  130. if (!ctx) {
  131. mutex_unlock(&mgr->lock);
  132. return -EINVAL;
  133. }
  134. /* TODO: these two are always zero */
  135. out->state.flags = 0x0;
  136. out->state.hangs = 0x0;
  137. /* determine if a GPU reset has occured since the last call */
  138. reset_counter = atomic_read(&adev->gpu_reset_counter);
  139. /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
  140. if (ctx->reset_counter == reset_counter)
  141. out->state.reset_status = AMDGPU_CTX_NO_RESET;
  142. else
  143. out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
  144. ctx->reset_counter = reset_counter;
  145. mutex_unlock(&mgr->lock);
  146. return 0;
  147. }
  148. int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
  149. struct drm_file *filp)
  150. {
  151. int r;
  152. uint32_t id;
  153. union drm_amdgpu_ctx *args = data;
  154. struct amdgpu_device *adev = dev->dev_private;
  155. struct amdgpu_fpriv *fpriv = filp->driver_priv;
  156. r = 0;
  157. id = args->in.ctx_id;
  158. switch (args->in.op) {
  159. case AMDGPU_CTX_OP_ALLOC_CTX:
  160. r = amdgpu_ctx_alloc(adev, fpriv, &id);
  161. args->out.alloc.ctx_id = id;
  162. break;
  163. case AMDGPU_CTX_OP_FREE_CTX:
  164. r = amdgpu_ctx_free(fpriv, id);
  165. break;
  166. case AMDGPU_CTX_OP_QUERY_STATE:
  167. r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
  168. break;
  169. default:
  170. return -EINVAL;
  171. }
  172. return r;
  173. }
  174. struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
  175. {
  176. struct amdgpu_ctx *ctx;
  177. struct amdgpu_ctx_mgr *mgr;
  178. if (!fpriv)
  179. return NULL;
  180. mgr = &fpriv->ctx_mgr;
  181. mutex_lock(&mgr->lock);
  182. ctx = idr_find(&mgr->ctx_handles, id);
  183. if (ctx)
  184. kref_get(&ctx->refcount);
  185. mutex_unlock(&mgr->lock);
  186. return ctx;
  187. }
  188. int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
  189. {
  190. if (ctx == NULL)
  191. return -EINVAL;
  192. kref_put(&ctx->refcount, amdgpu_ctx_do_release);
  193. return 0;
  194. }
  195. uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
  196. struct fence *fence)
  197. {
  198. struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
  199. uint64_t seq = cring->sequence;
  200. unsigned idx = 0;
  201. struct fence *other = NULL;
  202. idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
  203. other = cring->fences[idx];
  204. if (other) {
  205. signed long r;
  206. r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
  207. if (r < 0)
  208. DRM_ERROR("Error (%ld) waiting for fence!\n", r);
  209. }
  210. fence_get(fence);
  211. spin_lock(&ctx->ring_lock);
  212. cring->fences[idx] = fence;
  213. cring->sequence++;
  214. spin_unlock(&ctx->ring_lock);
  215. fence_put(other);
  216. return seq;
  217. }
  218. struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
  219. struct amdgpu_ring *ring, uint64_t seq)
  220. {
  221. struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
  222. struct fence *fence;
  223. spin_lock(&ctx->ring_lock);
  224. if (seq >= cring->sequence) {
  225. spin_unlock(&ctx->ring_lock);
  226. return ERR_PTR(-EINVAL);
  227. }
  228. if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
  229. spin_unlock(&ctx->ring_lock);
  230. return NULL;
  231. }
  232. fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
  233. spin_unlock(&ctx->ring_lock);
  234. return fence;
  235. }
  236. void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
  237. {
  238. mutex_init(&mgr->lock);
  239. idr_init(&mgr->ctx_handles);
  240. }
  241. void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
  242. {
  243. struct amdgpu_ctx *ctx;
  244. struct idr *idp;
  245. uint32_t id;
  246. idp = &mgr->ctx_handles;
  247. idr_for_each_entry(idp, ctx, id) {
  248. if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
  249. DRM_ERROR("ctx %p is still alive\n", ctx);
  250. }
  251. idr_destroy(&mgr->ctx_handles);
  252. mutex_destroy(&mgr->lock);
  253. }