|
@@ -49,8 +49,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
|
|
|
|
|
|
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
|
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
|
{
|
|
{
|
|
- struct amdgpu_ring *ring;
|
|
|
|
- struct drm_sched_rq *rq;
|
|
|
|
unsigned long bo_size;
|
|
unsigned long bo_size;
|
|
const char *fw_name;
|
|
const char *fw_name;
|
|
const struct common_firmware_header *hdr;
|
|
const struct common_firmware_header *hdr;
|
|
@@ -102,24 +100,6 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
|
return r;
|
|
return r;
|
|
}
|
|
}
|
|
|
|
|
|
- ring = &adev->vcn.ring_dec;
|
|
|
|
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
|
|
|
- r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
|
|
|
|
- rq, NULL);
|
|
|
|
- if (r != 0) {
|
|
|
|
- DRM_ERROR("Failed setting up VCN dec run queue.\n");
|
|
|
|
- return r;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- ring = &adev->vcn.ring_enc[0];
|
|
|
|
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
|
|
|
- r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
|
|
|
|
- rq, NULL);
|
|
|
|
- if (r != 0) {
|
|
|
|
- DRM_ERROR("Failed setting up VCN enc run queue.\n");
|
|
|
|
- return r;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -129,10 +109,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
|
|
|
|
|
|
kfree(adev->vcn.saved_bo);
|
|
kfree(adev->vcn.saved_bo);
|
|
|
|
|
|
- drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
|
|
|
|
-
|
|
|
|
- drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
|
|
|
|
-
|
|
|
|
amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
|
|
amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
|
|
&adev->vcn.gpu_addr,
|
|
&adev->vcn.gpu_addr,
|
|
(void **)&adev->vcn.cpu_addr);
|
|
(void **)&adev->vcn.cpu_addr);
|
|
@@ -278,7 +254,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
|
|
}
|
|
}
|
|
|
|
|
|
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
|
|
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
|
|
- struct amdgpu_bo *bo, bool direct,
|
|
|
|
|
|
+ struct amdgpu_bo *bo,
|
|
struct dma_fence **fence)
|
|
struct dma_fence **fence)
|
|
{
|
|
{
|
|
struct amdgpu_device *adev = ring->adev;
|
|
struct amdgpu_device *adev = ring->adev;
|
|
@@ -306,19 +282,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
|
|
}
|
|
}
|
|
ib->length_dw = 16;
|
|
ib->length_dw = 16;
|
|
|
|
|
|
- if (direct) {
|
|
|
|
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
|
|
|
- job->fence = dma_fence_get(f);
|
|
|
|
- if (r)
|
|
|
|
- goto err_free;
|
|
|
|
|
|
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
|
|
|
+ job->fence = dma_fence_get(f);
|
|
|
|
+ if (r)
|
|
|
|
+ goto err_free;
|
|
|
|
|
|
- amdgpu_job_free(job);
|
|
|
|
- } else {
|
|
|
|
- r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec,
|
|
|
|
- AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
|
|
|
- if (r)
|
|
|
|
- goto err_free;
|
|
|
|
- }
|
|
|
|
|
|
+ amdgpu_job_free(job);
|
|
|
|
|
|
amdgpu_bo_fence(bo, f, false);
|
|
amdgpu_bo_fence(bo, f, false);
|
|
amdgpu_bo_unreserve(bo);
|
|
amdgpu_bo_unreserve(bo);
|
|
@@ -370,11 +339,11 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
|
for (i = 14; i < 1024; ++i)
|
|
for (i = 14; i < 1024; ++i)
|
|
msg[i] = cpu_to_le32(0x0);
|
|
msg[i] = cpu_to_le32(0x0);
|
|
|
|
|
|
- return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
|
|
|
|
|
|
+ return amdgpu_vcn_dec_send_msg(ring, bo, fence);
|
|
}
|
|
}
|
|
|
|
|
|
static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|
static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|
- bool direct, struct dma_fence **fence)
|
|
|
|
|
|
+ struct dma_fence **fence)
|
|
{
|
|
{
|
|
struct amdgpu_device *adev = ring->adev;
|
|
struct amdgpu_device *adev = ring->adev;
|
|
struct amdgpu_bo *bo = NULL;
|
|
struct amdgpu_bo *bo = NULL;
|
|
@@ -396,7 +365,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
|
for (i = 6; i < 1024; ++i)
|
|
for (i = 6; i < 1024; ++i)
|
|
msg[i] = cpu_to_le32(0x0);
|
|
msg[i] = cpu_to_le32(0x0);
|
|
|
|
|
|
- return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
|
|
|
|
|
|
+ return amdgpu_vcn_dec_send_msg(ring, bo, fence);
|
|
}
|
|
}
|
|
|
|
|
|
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|
@@ -410,7 +379,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|
goto error;
|
|
goto error;
|
|
}
|
|
}
|
|
|
|
|
|
- r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence);
|
|
|
|
|
|
+ r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
|
|
if (r) {
|
|
if (r) {
|
|
DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
|
|
DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
|
|
goto error;
|
|
goto error;
|