|
@@ -952,37 +952,28 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
|
|
|
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
|
|
bool direct, struct dma_fence **fence)
|
|
|
{
|
|
|
- struct ttm_operation_ctx ctx = { true, false };
|
|
|
- struct ttm_validate_buffer tv;
|
|
|
- struct ww_acquire_ctx ticket;
|
|
|
- struct list_head head;
|
|
|
+ struct amdgpu_device *adev = ring->adev;
|
|
|
+ struct dma_fence *f = NULL;
|
|
|
struct amdgpu_job *job;
|
|
|
struct amdgpu_ib *ib;
|
|
|
- struct dma_fence *f = NULL;
|
|
|
- struct amdgpu_device *adev = ring->adev;
|
|
|
- uint64_t addr;
|
|
|
uint32_t data[4];
|
|
|
- int i, r;
|
|
|
-
|
|
|
- memset(&tv, 0, sizeof(tv));
|
|
|
- tv.bo = &bo->tbo;
|
|
|
-
|
|
|
- INIT_LIST_HEAD(&head);
|
|
|
- list_add(&tv.head, &head);
|
|
|
+ uint64_t addr;
|
|
|
+ long r;
|
|
|
+ int i;
|
|
|
|
|
|
- r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
|
|
|
- if (r)
|
|
|
- return r;
|
|
|
+ amdgpu_bo_kunmap(bo);
|
|
|
+ amdgpu_bo_unpin(bo);
|
|
|
|
|
|
if (!ring->adev->uvd.address_64_bit) {
|
|
|
+ struct ttm_operation_ctx ctx = { true, false };
|
|
|
+
|
|
|
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
|
|
|
amdgpu_uvd_force_into_uvd_segment(bo);
|
|
|
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
|
|
+ if (r)
|
|
|
+ goto err;
|
|
|
}
|
|
|
|
|
|
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
|
|
- if (r)
|
|
|
- goto err;
|
|
|
-
|
|
|
r = amdgpu_job_alloc_with_ib(adev, 64, &job);
|
|
|
if (r)
|
|
|
goto err;
|
|
@@ -1014,6 +1005,14 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
|
|
ib->length_dw = 16;
|
|
|
|
|
|
if (direct) {
|
|
|
+ r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
|
|
|
+ true, false,
|
|
|
+ msecs_to_jiffies(10));
|
|
|
+ if (r == 0)
|
|
|
+ r = -ETIMEDOUT;
|
|
|
+ if (r < 0)
|
|
|
+ goto err_free;
|
|
|
+
|
|
|
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
|
|
job->fence = dma_fence_get(f);
|
|
|
if (r)
|
|
@@ -1021,17 +1020,23 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
|
|
|
|
|
amdgpu_job_free(job);
|
|
|
} else {
|
|
|
+ r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
|
|
|
+ AMDGPU_FENCE_OWNER_UNDEFINED, false);
|
|
|
+ if (r)
|
|
|
+ goto err_free;
|
|
|
+
|
|
|
r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
|
|
|
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
|
|
if (r)
|
|
|
goto err_free;
|
|
|
}
|
|
|
|
|
|
- ttm_eu_fence_buffer_objects(&ticket, &head, f);
|
|
|
+ amdgpu_bo_fence(bo, f, false);
|
|
|
+ amdgpu_bo_unreserve(bo);
|
|
|
+ amdgpu_bo_unref(&bo);
|
|
|
|
|
|
if (fence)
|
|
|
*fence = dma_fence_get(f);
|
|
|
- amdgpu_bo_unref(&bo);
|
|
|
dma_fence_put(f);
|
|
|
|
|
|
return 0;
|
|
@@ -1040,7 +1045,8 @@ err_free:
|
|
|
amdgpu_job_free(job);
|
|
|
|
|
|
err:
|
|
|
- ttm_eu_backoff_reservation(&ticket, &head);
|
|
|
+ amdgpu_bo_unreserve(bo);
|
|
|
+ amdgpu_bo_unref(&bo);
|
|
|
return r;
|
|
|
}
|
|
|
|
|
@@ -1051,31 +1057,16 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|
|
struct dma_fence **fence)
|
|
|
{
|
|
|
struct amdgpu_device *adev = ring->adev;
|
|
|
- struct amdgpu_bo *bo;
|
|
|
+ struct amdgpu_bo *bo = NULL;
|
|
|
uint32_t *msg;
|
|
|
int r, i;
|
|
|
|
|
|
- r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
|
|
|
- AMDGPU_GEM_DOMAIN_VRAM,
|
|
|
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
|
|
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
|
|
- NULL, NULL, &bo);
|
|
|
+ r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
|
|
|
+ AMDGPU_GEM_DOMAIN_VRAM,
|
|
|
+ &bo, NULL, (void **)&msg);
|
|
|
if (r)
|
|
|
return r;
|
|
|
|
|
|
- r = amdgpu_bo_reserve(bo, false);
|
|
|
- if (r) {
|
|
|
- amdgpu_bo_unref(&bo);
|
|
|
- return r;
|
|
|
- }
|
|
|
-
|
|
|
- r = amdgpu_bo_kmap(bo, (void **)&msg);
|
|
|
- if (r) {
|
|
|
- amdgpu_bo_unreserve(bo);
|
|
|
- amdgpu_bo_unref(&bo);
|
|
|
- return r;
|
|
|
- }
|
|
|
-
|
|
|
/* stitch together an UVD create msg */
|
|
|
msg[0] = cpu_to_le32(0x00000de4);
|
|
|
msg[1] = cpu_to_le32(0x00000000);
|
|
@@ -1091,9 +1082,6 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|
|
for (i = 11; i < 1024; ++i)
|
|
|
msg[i] = cpu_to_le32(0x0);
|
|
|
|
|
|
- amdgpu_bo_kunmap(bo);
|
|
|
- amdgpu_bo_unreserve(bo);
|
|
|
-
|
|
|
return amdgpu_uvd_send_msg(ring, bo, true, fence);
|
|
|
}
|
|
|
|
|
@@ -1101,31 +1089,16 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|
|
bool direct, struct dma_fence **fence)
|
|
|
{
|
|
|
struct amdgpu_device *adev = ring->adev;
|
|
|
- struct amdgpu_bo *bo;
|
|
|
+ struct amdgpu_bo *bo = NULL;
|
|
|
uint32_t *msg;
|
|
|
int r, i;
|
|
|
|
|
|
- r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
|
|
|
- AMDGPU_GEM_DOMAIN_VRAM,
|
|
|
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
|
|
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
|
|
- NULL, NULL, &bo);
|
|
|
+ r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
|
|
|
+ AMDGPU_GEM_DOMAIN_VRAM,
|
|
|
+ &bo, NULL, (void **)&msg);
|
|
|
if (r)
|
|
|
return r;
|
|
|
|
|
|
- r = amdgpu_bo_reserve(bo, false);
|
|
|
- if (r) {
|
|
|
- amdgpu_bo_unref(&bo);
|
|
|
- return r;
|
|
|
- }
|
|
|
-
|
|
|
- r = amdgpu_bo_kmap(bo, (void **)&msg);
|
|
|
- if (r) {
|
|
|
- amdgpu_bo_unreserve(bo);
|
|
|
- amdgpu_bo_unref(&bo);
|
|
|
- return r;
|
|
|
- }
|
|
|
-
|
|
|
/* stitch together an UVD destroy msg */
|
|
|
msg[0] = cpu_to_le32(0x00000de4);
|
|
|
msg[1] = cpu_to_le32(0x00000002);
|
|
@@ -1134,9 +1107,6 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|
|
for (i = 4; i < 1024; ++i)
|
|
|
msg[i] = cpu_to_le32(0x0);
|
|
|
|
|
|
- amdgpu_bo_kunmap(bo);
|
|
|
- amdgpu_bo_unreserve(bo);
|
|
|
-
|
|
|
return amdgpu_uvd_send_msg(ring, bo, direct, fence);
|
|
|
}
|
|
|
|