|
@@ -231,6 +231,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|
if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
|
|
if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
|
|
fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
|
|
fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
|
|
|
|
|
|
|
|
+ /* wrap the last IB with fence */
|
|
|
|
+ if (job && job->uf_addr) {
|
|
|
|
+ amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
|
|
|
|
+ fence_flags | AMDGPU_FENCE_FLAG_64BIT);
|
|
|
|
+ }
|
|
|
|
+
|
|
r = amdgpu_fence_emit(ring, f, fence_flags);
|
|
r = amdgpu_fence_emit(ring, f, fence_flags);
|
|
if (r) {
|
|
if (r) {
|
|
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
|
|
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
|
|
@@ -243,12 +249,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|
if (ring->funcs->insert_end)
|
|
if (ring->funcs->insert_end)
|
|
ring->funcs->insert_end(ring);
|
|
ring->funcs->insert_end(ring);
|
|
|
|
|
|
- /* wrap the last IB with fence */
|
|
|
|
- if (job && job->uf_addr) {
|
|
|
|
- amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
|
|
|
|
- fence_flags | AMDGPU_FENCE_FLAG_64BIT);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
|
|
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
|
|
amdgpu_ring_patch_cond_exec(ring, patch_offset);
|
|
amdgpu_ring_patch_cond_exec(ring, patch_offset);
|
|
|
|
|