|
@@ -86,7 +86,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
|
return r;
|
|
return r;
|
|
}
|
|
}
|
|
|
|
|
|
-void amdgpu_job_free(struct amdgpu_job *job)
|
|
|
|
|
|
+static void amdgpu_job_free_resources(struct amdgpu_job *job)
|
|
{
|
|
{
|
|
struct fence *f;
|
|
struct fence *f;
|
|
unsigned i;
|
|
unsigned i;
|
|
@@ -100,9 +100,6 @@ void amdgpu_job_free(struct amdgpu_job *job)
|
|
|
|
|
|
amdgpu_bo_unref(&job->uf_bo);
|
|
amdgpu_bo_unref(&job->uf_bo);
|
|
amdgpu_sync_free(&job->sync);
|
|
amdgpu_sync_free(&job->sync);
|
|
-
|
|
|
|
- if (!job->base.use_sched)
|
|
|
|
- kfree(job);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
void amdgpu_job_free_func(struct kref *refcount)
|
|
void amdgpu_job_free_func(struct kref *refcount)
|
|
@@ -111,6 +108,12 @@ void amdgpu_job_free_func(struct kref *refcount)
|
|
kfree(job);
|
|
kfree(job);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void amdgpu_job_free(struct amdgpu_job *job)
|
|
|
|
+{
|
|
|
|
+ amdgpu_job_free_resources(job);
|
|
|
|
+ kfree(job);
|
|
|
|
+}
|
|
|
|
+
|
|
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
|
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
|
struct amd_sched_entity *entity, void *owner,
|
|
struct amd_sched_entity *entity, void *owner,
|
|
struct fence **f)
|
|
struct fence **f)
|
|
@@ -187,7 +190,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
|
|
|
|
|
|
err:
|
|
err:
|
|
job->fence = fence;
|
|
job->fence = fence;
|
|
- amdgpu_job_free(job);
|
|
|
|
|
|
+ amdgpu_job_free_resources(job);
|
|
return fence;
|
|
return fence;
|
|
}
|
|
}
|
|
|
|
|