|
@@ -37,7 +37,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
|
|
|
job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
|
|
|
ring->fence_drv.sync_seq);
|
|
|
|
|
|
- amdgpu_device_gpu_recover(job->adev, job, false);
|
|
|
+ amdgpu_device_gpu_recover(ring->adev, job, false);
|
|
|
}
|
|
|
|
|
|
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
|
@@ -54,7 +54,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
|
|
if (!*job)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- (*job)->adev = adev;
|
|
|
+ /*
|
|
|
+ * Initialize the scheduler to at least some ring so that we always
|
|
|
+ * have a pointer to adev.
|
|
|
+ */
|
|
|
+ (*job)->base.sched = &adev->rings[0]->sched;
|
|
|
(*job)->vm = vm;
|
|
|
(*job)->ibs = (void *)&(*job)[1];
|
|
|
(*job)->num_ibs = num_ibs;
|
|
@@ -86,6 +90,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
|
|
|
|
|
void amdgpu_job_free_resources(struct amdgpu_job *job)
|
|
|
{
|
|
|
+ struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
|
|
|
struct dma_fence *f;
|
|
|
unsigned i;
|
|
|
|
|
@@ -93,7 +98,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
|
|
|
f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
|
|
|
|
|
|
for (i = 0; i < job->num_ibs; ++i)
|
|
|
- amdgpu_ib_free(job->adev, &job->ibs[i], f);
|
|
|
+ amdgpu_ib_free(ring->adev, &job->ibs[i], f);
|
|
|
}
|
|
|
|
|
|
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
|
|
@@ -167,7 +172,8 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
|
|
|
|
|
|
if (fence && explicit) {
|
|
|
if (drm_sched_dependency_optimized(fence, s_entity)) {
|
|
|
- r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
|
|
|
+ r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
|
|
|
+ fence, false);
|
|
|
if (r)
|
|
|
DRM_ERROR("Error adding fence to sync (%d)\n", r);
|
|
|
}
|
|
@@ -190,7 +196,6 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
|
|
|
{
|
|
|
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
|
|
|
struct dma_fence *fence = NULL, *finished;
|
|
|
- struct amdgpu_device *adev;
|
|
|
struct amdgpu_job *job;
|
|
|
int r;
|
|
|
|
|
@@ -200,13 +205,12 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
|
|
|
}
|
|
|
job = to_amdgpu_job(sched_job);
|
|
|
finished = &job->base.s_fence->finished;
|
|
|
- adev = job->adev;
|
|
|
|
|
|
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
|
|
|
|
|
|
trace_amdgpu_sched_run_job(job);
|
|
|
|
|
|
- if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
|
|
|
+ if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
|
|
|
dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
|
|
|
|
|
|
if (finished->error < 0) {
|