amdgpu_ctx.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: monk liu <monk.liu@amd.com>
  23. */
  24. #include <drm/drmP.h>
  25. #include <drm/drm_auth.h>
  26. #include "amdgpu.h"
  27. #include "amdgpu_sched.h"
  28. static int amdgpu_ctx_priority_permit(struct drm_file *filp,
  29. enum drm_sched_priority priority)
  30. {
  31. /* NORMAL and below are accessible by everyone */
  32. if (priority <= DRM_SCHED_PRIORITY_NORMAL)
  33. return 0;
  34. if (capable(CAP_SYS_NICE))
  35. return 0;
  36. if (drm_is_current_master(filp))
  37. return 0;
  38. return -EACCES;
  39. }
  40. static int amdgpu_ctx_init(struct amdgpu_device *adev,
  41. enum drm_sched_priority priority,
  42. struct drm_file *filp,
  43. struct amdgpu_ctx *ctx)
  44. {
  45. unsigned i, j;
  46. int r;
  47. if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
  48. return -EINVAL;
  49. r = amdgpu_ctx_priority_permit(filp, priority);
  50. if (r)
  51. return r;
  52. memset(ctx, 0, sizeof(*ctx));
  53. ctx->adev = adev;
  54. kref_init(&ctx->refcount);
  55. spin_lock_init(&ctx->ring_lock);
  56. ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
  57. sizeof(struct dma_fence*), GFP_KERNEL);
  58. if (!ctx->fences)
  59. return -ENOMEM;
  60. mutex_init(&ctx->lock);
  61. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  62. ctx->rings[i].sequence = 1;
  63. ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
  64. }
  65. ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
  66. ctx->reset_counter_query = ctx->reset_counter;
  67. ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
  68. ctx->init_priority = priority;
  69. ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
  70. /* create context entity for each ring */
  71. for (i = 0; i < adev->num_rings; i++) {
  72. struct amdgpu_ring *ring = adev->rings[i];
  73. struct drm_sched_rq *rq;
  74. rq = &ring->sched.sched_rq[priority];
  75. if (ring == &adev->gfx.kiq.ring)
  76. continue;
  77. r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
  78. rq, amdgpu_sched_jobs, &ctx->guilty);
  79. if (r)
  80. goto failed;
  81. }
  82. r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr);
  83. if (r)
  84. goto failed;
  85. return 0;
  86. failed:
  87. for (j = 0; j < i; j++)
  88. drm_sched_entity_fini(&adev->rings[j]->sched,
  89. &ctx->rings[j].entity);
  90. kfree(ctx->fences);
  91. ctx->fences = NULL;
  92. return r;
  93. }
  94. static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
  95. {
  96. struct amdgpu_device *adev = ctx->adev;
  97. unsigned i, j;
  98. if (!adev)
  99. return;
  100. for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
  101. for (j = 0; j < amdgpu_sched_jobs; ++j)
  102. dma_fence_put(ctx->rings[i].fences[j]);
  103. kfree(ctx->fences);
  104. ctx->fences = NULL;
  105. for (i = 0; i < adev->num_rings; i++)
  106. drm_sched_entity_fini(&adev->rings[i]->sched,
  107. &ctx->rings[i].entity);
  108. amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
  109. mutex_destroy(&ctx->lock);
  110. }
  111. static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
  112. struct amdgpu_fpriv *fpriv,
  113. struct drm_file *filp,
  114. enum drm_sched_priority priority,
  115. uint32_t *id)
  116. {
  117. struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
  118. struct amdgpu_ctx *ctx;
  119. int r;
  120. ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  121. if (!ctx)
  122. return -ENOMEM;
  123. mutex_lock(&mgr->lock);
  124. r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
  125. if (r < 0) {
  126. mutex_unlock(&mgr->lock);
  127. kfree(ctx);
  128. return r;
  129. }
  130. *id = (uint32_t)r;
  131. r = amdgpu_ctx_init(adev, priority, filp, ctx);
  132. if (r) {
  133. idr_remove(&mgr->ctx_handles, *id);
  134. *id = 0;
  135. kfree(ctx);
  136. }
  137. mutex_unlock(&mgr->lock);
  138. return r;
  139. }
  140. static void amdgpu_ctx_do_release(struct kref *ref)
  141. {
  142. struct amdgpu_ctx *ctx;
  143. ctx = container_of(ref, struct amdgpu_ctx, refcount);
  144. amdgpu_ctx_fini(ctx);
  145. kfree(ctx);
  146. }
  147. static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
  148. {
  149. struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
  150. struct amdgpu_ctx *ctx;
  151. mutex_lock(&mgr->lock);
  152. ctx = idr_remove(&mgr->ctx_handles, id);
  153. if (ctx)
  154. kref_put(&ctx->refcount, amdgpu_ctx_do_release);
  155. mutex_unlock(&mgr->lock);
  156. return ctx ? 0 : -EINVAL;
  157. }
  158. static int amdgpu_ctx_query(struct amdgpu_device *adev,
  159. struct amdgpu_fpriv *fpriv, uint32_t id,
  160. union drm_amdgpu_ctx_out *out)
  161. {
  162. struct amdgpu_ctx *ctx;
  163. struct amdgpu_ctx_mgr *mgr;
  164. unsigned reset_counter;
  165. if (!fpriv)
  166. return -EINVAL;
  167. mgr = &fpriv->ctx_mgr;
  168. mutex_lock(&mgr->lock);
  169. ctx = idr_find(&mgr->ctx_handles, id);
  170. if (!ctx) {
  171. mutex_unlock(&mgr->lock);
  172. return -EINVAL;
  173. }
  174. /* TODO: these two are always zero */
  175. out->state.flags = 0x0;
  176. out->state.hangs = 0x0;
  177. /* determine if a GPU reset has occured since the last call */
  178. reset_counter = atomic_read(&adev->gpu_reset_counter);
  179. /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
  180. if (ctx->reset_counter_query == reset_counter)
  181. out->state.reset_status = AMDGPU_CTX_NO_RESET;
  182. else
  183. out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
  184. ctx->reset_counter_query = reset_counter;
  185. mutex_unlock(&mgr->lock);
  186. return 0;
  187. }
  188. static int amdgpu_ctx_query2(struct amdgpu_device *adev,
  189. struct amdgpu_fpriv *fpriv, uint32_t id,
  190. union drm_amdgpu_ctx_out *out)
  191. {
  192. struct amdgpu_ctx *ctx;
  193. struct amdgpu_ctx_mgr *mgr;
  194. if (!fpriv)
  195. return -EINVAL;
  196. mgr = &fpriv->ctx_mgr;
  197. mutex_lock(&mgr->lock);
  198. ctx = idr_find(&mgr->ctx_handles, id);
  199. if (!ctx) {
  200. mutex_unlock(&mgr->lock);
  201. return -EINVAL;
  202. }
  203. out->state.flags = 0x0;
  204. out->state.hangs = 0x0;
  205. if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
  206. out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
  207. if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
  208. out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
  209. if (atomic_read(&ctx->guilty))
  210. out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
  211. mutex_unlock(&mgr->lock);
  212. return 0;
  213. }
  214. int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
  215. struct drm_file *filp)
  216. {
  217. int r;
  218. uint32_t id;
  219. enum drm_sched_priority priority;
  220. union drm_amdgpu_ctx *args = data;
  221. struct amdgpu_device *adev = dev->dev_private;
  222. struct amdgpu_fpriv *fpriv = filp->driver_priv;
  223. r = 0;
  224. id = args->in.ctx_id;
  225. priority = amdgpu_to_sched_priority(args->in.priority);
  226. /* For backwards compatibility reasons, we need to accept
  227. * ioctls with garbage in the priority field */
  228. if (priority == DRM_SCHED_PRIORITY_INVALID)
  229. priority = DRM_SCHED_PRIORITY_NORMAL;
  230. switch (args->in.op) {
  231. case AMDGPU_CTX_OP_ALLOC_CTX:
  232. r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
  233. args->out.alloc.ctx_id = id;
  234. break;
  235. case AMDGPU_CTX_OP_FREE_CTX:
  236. r = amdgpu_ctx_free(fpriv, id);
  237. break;
  238. case AMDGPU_CTX_OP_QUERY_STATE:
  239. r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
  240. break;
  241. case AMDGPU_CTX_OP_QUERY_STATE2:
  242. r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
  243. break;
  244. default:
  245. return -EINVAL;
  246. }
  247. return r;
  248. }
  249. struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
  250. {
  251. struct amdgpu_ctx *ctx;
  252. struct amdgpu_ctx_mgr *mgr;
  253. if (!fpriv)
  254. return NULL;
  255. mgr = &fpriv->ctx_mgr;
  256. mutex_lock(&mgr->lock);
  257. ctx = idr_find(&mgr->ctx_handles, id);
  258. if (ctx)
  259. kref_get(&ctx->refcount);
  260. mutex_unlock(&mgr->lock);
  261. return ctx;
  262. }
  263. int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
  264. {
  265. if (ctx == NULL)
  266. return -EINVAL;
  267. kref_put(&ctx->refcount, amdgpu_ctx_do_release);
  268. return 0;
  269. }
  270. int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
  271. struct dma_fence *fence, uint64_t* handler)
  272. {
  273. struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
  274. uint64_t seq = cring->sequence;
  275. unsigned idx = 0;
  276. struct dma_fence *other = NULL;
  277. idx = seq & (amdgpu_sched_jobs - 1);
  278. other = cring->fences[idx];
  279. if (other)
  280. BUG_ON(!dma_fence_is_signaled(other));
  281. dma_fence_get(fence);
  282. spin_lock(&ctx->ring_lock);
  283. cring->fences[idx] = fence;
  284. cring->sequence++;
  285. spin_unlock(&ctx->ring_lock);
  286. dma_fence_put(other);
  287. if (handler)
  288. *handler = seq;
  289. return 0;
  290. }
  291. struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
  292. struct amdgpu_ring *ring, uint64_t seq)
  293. {
  294. struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
  295. struct dma_fence *fence;
  296. spin_lock(&ctx->ring_lock);
  297. if (seq == ~0ull)
  298. seq = ctx->rings[ring->idx].sequence - 1;
  299. if (seq >= cring->sequence) {
  300. spin_unlock(&ctx->ring_lock);
  301. return ERR_PTR(-EINVAL);
  302. }
  303. if (seq + amdgpu_sched_jobs < cring->sequence) {
  304. spin_unlock(&ctx->ring_lock);
  305. return NULL;
  306. }
  307. fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
  308. spin_unlock(&ctx->ring_lock);
  309. return fence;
  310. }
  311. void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
  312. enum drm_sched_priority priority)
  313. {
  314. int i;
  315. struct amdgpu_device *adev = ctx->adev;
  316. struct drm_sched_rq *rq;
  317. struct drm_sched_entity *entity;
  318. struct amdgpu_ring *ring;
  319. enum drm_sched_priority ctx_prio;
  320. ctx->override_priority = priority;
  321. ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
  322. ctx->init_priority : ctx->override_priority;
  323. for (i = 0; i < adev->num_rings; i++) {
  324. ring = adev->rings[i];
  325. entity = &ctx->rings[i].entity;
  326. rq = &ring->sched.sched_rq[ctx_prio];
  327. if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
  328. continue;
  329. drm_sched_entity_set_rq(entity, rq);
  330. }
  331. }
  332. int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
  333. {
  334. struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
  335. unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
  336. struct dma_fence *other = cring->fences[idx];
  337. if (other) {
  338. signed long r;
  339. r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
  340. if (r < 0) {
  341. DRM_ERROR("Error (%ld) waiting for fence!\n", r);
  342. return r;
  343. }
  344. }
  345. return 0;
  346. }
  347. void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
  348. {
  349. mutex_init(&mgr->lock);
  350. idr_init(&mgr->ctx_handles);
  351. }
  352. void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
  353. {
  354. struct amdgpu_ctx *ctx;
  355. struct idr *idp;
  356. uint32_t id;
  357. idp = &mgr->ctx_handles;
  358. idr_for_each_entry(idp, ctx, id) {
  359. if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
  360. DRM_ERROR("ctx %p is still alive\n", ctx);
  361. }
  362. idr_destroy(&mgr->ctx_handles);
  363. mutex_destroy(&mgr->lock);
  364. }