|
@@ -49,7 +49,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
|
|
struct amdgpu_ctx *ctx)
|
|
|
{
|
|
|
struct drm_sched_rq *sdma_rqs[AMDGPU_MAX_RINGS];
|
|
|
- unsigned i, j, num_sdma_rqs;
|
|
|
+ struct drm_sched_rq *comp_rqs[AMDGPU_MAX_RINGS];
|
|
|
+ unsigned i, j, num_sdma_rqs, num_comp_rqs;
|
|
|
int r;
|
|
|
|
|
|
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
|
|
@@ -82,6 +83,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
|
|
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
|
|
|
|
|
|
num_sdma_rqs = 0;
|
|
|
+ num_comp_rqs = 0;
|
|
|
for (i = 0; i < adev->num_rings; i++) {
|
|
|
struct amdgpu_ring *ring = adev->rings[i];
|
|
|
struct drm_sched_rq *rq;
|
|
@@ -89,6 +91,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
|
|
rq = &ring->sched.sched_rq[priority];
|
|
|
if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA)
|
|
|
sdma_rqs[num_sdma_rqs++] = rq;
|
|
|
+ else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
|
|
|
+ comp_rqs[num_comp_rqs++] = rq;
|
|
|
}
|
|
|
|
|
|
/* create context entity for each ring */
|
|
@@ -102,6 +106,10 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
|
|
r = drm_sched_entity_init(&ctx->rings[i].entity,
|
|
|
sdma_rqs, num_sdma_rqs,
|
|
|
&ctx->guilty);
|
|
|
+ } else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
|
|
+ r = drm_sched_entity_init(&ctx->rings[i].entity,
|
|
|
+ comp_rqs, num_comp_rqs,
|
|
|
+ &ctx->guilty);
|
|
|
} else {
|
|
|
struct drm_sched_rq *rq;
|
|
|
|