|
@@ -34,13 +34,14 @@ static void amdgpu_job_free_handler(struct work_struct *ws)
|
|
|
amd_sched_job_put(&job->base);
|
|
|
}
|
|
|
|
|
|
-void amdgpu_job_timeout_func(struct work_struct *work)
|
|
|
+static void amdgpu_job_timedout(struct amd_sched_job *s_job)
|
|
|
{
|
|
|
- struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
|
|
|
+ struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
|
|
|
+
|
|
|
DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
|
|
|
- job->base.sched->name,
|
|
|
- (uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
|
|
|
- job->ring->fence_drv.sync_seq);
|
|
|
+ job->base.sched->name,
|
|
|
+ atomic_read(&job->ring->fence_drv.last_seq),
|
|
|
+ job->ring->fence_drv.sync_seq);
|
|
|
|
|
|
amd_sched_job_put(&job->base);
|
|
|
}
|
|
@@ -126,8 +127,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
|
|
return -EINVAL;
|
|
|
|
|
|
r = amd_sched_job_init(&job->base, &ring->sched,
|
|
|
- entity, amdgpu_job_timeout_func,
|
|
|
- amdgpu_job_free_func, owner, &fence);
|
|
|
+ entity, amdgpu_job_free_func, owner, &fence);
|
|
|
if (r)
|
|
|
return r;
|
|
|
|
|
@@ -197,4 +197,5 @@ err:
|
|
|
const struct amd_sched_backend_ops amdgpu_sched_ops = {
|
|
|
.dependency = amdgpu_job_dependency,
|
|
|
.run_job = amdgpu_job_run,
|
|
|
+ .timedout_job = amdgpu_job_timedout,
|
|
|
};
|