|
@@ -2684,7 +2684,6 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
|
|
|
queue_mask |= (1ull << i);
|
|
|
}
|
|
|
|
|
|
- kiq_ring->ready = true;
|
|
|
r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
|
|
|
if (r) {
|
|
|
DRM_ERROR("Failed to lock KIQ (%d).\n", r);
|
|
@@ -3091,26 +3090,33 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
|
|
|
|
|
|
static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
|
|
|
{
|
|
|
- struct amdgpu_ring *ring = NULL;
|
|
|
- int r = 0, i;
|
|
|
-
|
|
|
- gfx_v9_0_cp_compute_enable(adev, true);
|
|
|
+ struct amdgpu_ring *ring;
|
|
|
+ int r;
|
|
|
|
|
|
ring = &adev->gfx.kiq.ring;
|
|
|
|
|
|
r = amdgpu_bo_reserve(ring->mqd_obj, false);
|
|
|
if (unlikely(r != 0))
|
|
|
- goto done;
|
|
|
+ return r;
|
|
|
|
|
|
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
|
|
|
- if (!r) {
|
|
|
- r = gfx_v9_0_kiq_init_queue(ring);
|
|
|
- amdgpu_bo_kunmap(ring->mqd_obj);
|
|
|
- ring->mqd_ptr = NULL;
|
|
|
- }
|
|
|
+ if (unlikely(r != 0))
|
|
|
+ return r;
|
|
|
+
|
|
|
+ gfx_v9_0_kiq_init_queue(ring);
|
|
|
+ amdgpu_bo_kunmap(ring->mqd_obj);
|
|
|
+ ring->mqd_ptr = NULL;
|
|
|
amdgpu_bo_unreserve(ring->mqd_obj);
|
|
|
- if (r)
|
|
|
- goto done;
|
|
|
+ ring->ready = true;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
|
|
|
+{
|
|
|
+ struct amdgpu_ring *ring = NULL;
|
|
|
+ int r = 0, i;
|
|
|
+
|
|
|
+ gfx_v9_0_cp_compute_enable(adev, true);
|
|
|
|
|
|
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
|
|
ring = &adev->gfx.compute_ring[i];
|
|
@@ -3153,11 +3159,15 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
+ r = gfx_v9_0_kiq_resume(adev);
|
|
|
+ if (r)
|
|
|
+ return r;
|
|
|
+
|
|
|
r = gfx_v9_0_cp_gfx_resume(adev);
|
|
|
if (r)
|
|
|
return r;
|
|
|
|
|
|
- r = gfx_v9_0_kiq_resume(adev);
|
|
|
+ r = gfx_v9_0_kcq_resume(adev);
|
|
|
if (r)
|
|
|
return r;
|
|
|
|