|
@@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|
return r;
|
|
return r;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ need_ctx_switch = ring->current_ctx != fence_ctx;
|
|
if (ring->funcs->emit_pipeline_sync && job &&
|
|
if (ring->funcs->emit_pipeline_sync && job &&
|
|
((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
|
|
((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
|
|
|
|
+ (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
|
|
amdgpu_vm_need_pipeline_sync(ring, job))) {
|
|
amdgpu_vm_need_pipeline_sync(ring, job))) {
|
|
need_pipe_sync = true;
|
|
need_pipe_sync = true;
|
|
dma_fence_put(tmp);
|
|
dma_fence_put(tmp);
|
|
@@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|
}
|
|
}
|
|
|
|
|
|
skip_preamble = ring->current_ctx == fence_ctx;
|
|
skip_preamble = ring->current_ctx == fence_ctx;
|
|
- need_ctx_switch = ring->current_ctx != fence_ctx;
|
|
|
|
if (job && ring->funcs->emit_cntxcntl) {
|
|
if (job && ring->funcs->emit_cntxcntl) {
|
|
if (need_ctx_switch)
|
|
if (need_ctx_switch)
|
|
status |= AMDGPU_HAVE_CTX_SWITCH;
|
|
status |= AMDGPU_HAVE_CTX_SWITCH;
|