|
@@ -124,7 +124,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|
|
bool skip_preamble, need_ctx_switch;
|
|
|
unsigned patch_offset = ~0;
|
|
|
struct amdgpu_vm *vm;
|
|
|
- uint64_t ctx;
|
|
|
+ uint64_t fence_ctx;
|
|
|
|
|
|
unsigned i;
|
|
|
int r = 0;
|
|
@@ -135,10 +135,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|
|
/* ring tests don't use a job */
|
|
|
if (job) {
|
|
|
vm = job->vm;
|
|
|
- ctx = job->ctx;
|
|
|
+ fence_ctx = job->fence_ctx;
|
|
|
} else {
|
|
|
vm = NULL;
|
|
|
- ctx = 0;
|
|
|
+ fence_ctx = 0;
|
|
|
}
|
|
|
|
|
|
if (!ring->ready) {
|
|
@@ -174,8 +174,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|
|
/* always set cond_exec_polling to CONTINUE */
|
|
|
*ring->cond_exe_cpu_addr = 1;
|
|
|
|
|
|
- skip_preamble = ring->current_ctx == ctx;
|
|
|
- need_ctx_switch = ring->current_ctx != ctx;
|
|
|
+ skip_preamble = ring->current_ctx == fence_ctx;
|
|
|
+ need_ctx_switch = ring->current_ctx != fence_ctx;
|
|
|
for (i = 0; i < num_ibs; ++i) {
|
|
|
ib = &ibs[i];
|
|
|
|
|
@@ -209,7 +209,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|
|
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
|
|
|
amdgpu_ring_patch_cond_exec(ring, patch_offset);
|
|
|
|
|
|
- ring->current_ctx = ctx;
|
|
|
+ ring->current_ctx = fence_ctx;
|
|
|
if (ring->funcs->emit_switch_buffer)
|
|
|
amdgpu_ring_emit_switch_buffer(ring);
|
|
|
amdgpu_ring_commit(ring);
|