|
@@ -713,7 +713,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
|
|
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
|
|
|
|
|
ring->ready = true;
|
|
|
+ }
|
|
|
|
|
|
+ /* unhalt the MEs */
|
|
|
+ sdma_v3_0_enable(adev, true);
|
|
|
+ /* enable sdma ring preemption */
|
|
|
+ sdma_v3_0_ctx_switch_enable(adev, true);
|
|
|
+
|
|
|
+ for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
|
+ ring = &adev->sdma.instance[i].ring;
|
|
|
r = amdgpu_ring_test_ring(ring);
|
|
|
if (r) {
|
|
|
ring->ready = false;
|
|
@@ -806,10 +814,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* unhalt the MEs */
|
|
|
- sdma_v3_0_enable(adev, true);
|
|
|
- /* enable sdma ring preemption */
|
|
|
- sdma_v3_0_ctx_switch_enable(adev, true);
|
|
|
+ /* disble sdma engine before programing it */
|
|
|
+ sdma_v3_0_ctx_switch_enable(adev, false);
|
|
|
+ sdma_v3_0_enable(adev, false);
|
|
|
|
|
|
/* start the gfx rings and rlc compute queues */
|
|
|
r = sdma_v3_0_gfx_resume(adev);
|