浏览代码

drm/amdgpu/gfx8: fix driver reload with KIQ

Drop the deactivation in KIQ init and drop the KCQ disabling
via KIQ.  We disable the MEC shortly after anyway, so there is
no need to wait for all of this.  Doing so seems to leave the
MEC in a bad way.

Tested-by: Michel Dänzer <michel.daenzer@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Alex Deucher 8 年之前
父节点
当前提交
dcf75843c0
共有 1 个文件被更改,包括 1 次插入68 次删除
  1. 1 68
      drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c

+ 1 - 68
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c

@@ -4636,56 +4636,6 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
 	return r;
 	return r;
 }
 }
 
 
-static int gfx_v8_0_kiq_kcq_disable(struct amdgpu_device *adev)
-{
-	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
-	uint32_t scratch, tmp = 0;
-	int r, i;
-
-	r = amdgpu_gfx_scratch_get(adev, &scratch);
-	if (r) {
-		DRM_ERROR("Failed to get scratch reg (%d).\n", r);
-		return r;
-	}
-	WREG32(scratch, 0xCAFEDEAD);
-
-	r = amdgpu_ring_alloc(kiq_ring, 6 + 3);
-	if (r) {
-		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		return r;
-	}
-	/* unmap queues */
-	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
-	amdgpu_ring_write(kiq_ring,
-			  PACKET3_UNMAP_QUEUES_ACTION(1)| /* RESET_QUEUES */
-			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(2)); /* select all queues */
-	amdgpu_ring_write(kiq_ring, 0);
-	amdgpu_ring_write(kiq_ring, 0);
-	amdgpu_ring_write(kiq_ring, 0);
-	amdgpu_ring_write(kiq_ring, 0);
-	/* write to scratch for completion */
-	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
-	amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
-	amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
-	amdgpu_ring_commit(kiq_ring);
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		tmp = RREG32(scratch);
-		if (tmp == 0xDEADBEEF)
-			break;
-		DRM_UDELAY(1);
-	}
-	if (i >= adev->usec_timeout) {
-		DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n",
-			  scratch, tmp);
-		r = -EINVAL;
-	}
-	amdgpu_gfx_scratch_free(adev, scratch);
-
-	return r;
-}
-
 static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
 static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
 {
 {
 	int i, r = 0;
 	int i, r = 0;
@@ -4891,7 +4841,6 @@ int gfx_v8_0_mqd_commit(struct amdgpu_device *adev,
 
 
 static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
 static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
 {
 {
-	int r = 0;
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
 	struct vi_mqd *mqd = ring->mqd_ptr;
 	struct vi_mqd *mqd = ring->mqd_ptr;
 	int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
 	int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
@@ -4908,11 +4857,6 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
 		amdgpu_ring_clear_ring(ring);
 		amdgpu_ring_clear_ring(ring);
 		mutex_lock(&adev->srbm_mutex);
 		mutex_lock(&adev->srbm_mutex);
 		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
 		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
-		r = gfx_v8_0_deactivate_hqd(adev, 1);
-		if (r) {
-			dev_err(adev->dev, "failed to deactivate ring %s\n", ring->name);
-			goto out_unlock;
-		}
 		gfx_v8_0_mqd_commit(adev, mqd);
 		gfx_v8_0_mqd_commit(adev, mqd);
 		vi_srbm_select(adev, 0, 0, 0, 0);
 		vi_srbm_select(adev, 0, 0, 0, 0);
 		mutex_unlock(&adev->srbm_mutex);
 		mutex_unlock(&adev->srbm_mutex);
@@ -4923,11 +4867,6 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
 		mutex_lock(&adev->srbm_mutex);
 		mutex_lock(&adev->srbm_mutex);
 		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
 		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
 		gfx_v8_0_mqd_init(ring);
 		gfx_v8_0_mqd_init(ring);
-		r = gfx_v8_0_deactivate_hqd(adev, 1);
-		if (r) {
-			dev_err(adev->dev, "failed to deactivate ring %s\n", ring->name);
-			goto out_unlock;
-		}
 		gfx_v8_0_mqd_commit(adev, mqd);
 		gfx_v8_0_mqd_commit(adev, mqd);
 		vi_srbm_select(adev, 0, 0, 0, 0);
 		vi_srbm_select(adev, 0, 0, 0, 0);
 		mutex_unlock(&adev->srbm_mutex);
 		mutex_unlock(&adev->srbm_mutex);
@@ -4936,12 +4875,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
 	}
 	}
 
 
-	return r;
-
-out_unlock:
-	vi_srbm_select(adev, 0, 0, 0, 0);
-	mutex_unlock(&adev->srbm_mutex);
-	return r;
+	return 0;
 }
 }
 
 
 static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
 static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
@@ -5145,7 +5079,6 @@ static int gfx_v8_0_hw_fini(void *handle)
 		pr_debug("For SRIOV client, shouldn't do anything.\n");
 		pr_debug("For SRIOV client, shouldn't do anything.\n");
 		return 0;
 		return 0;
 	}
 	}
-	gfx_v8_0_kiq_kcq_disable(adev);
 	gfx_v8_0_cp_enable(adev, false);
 	gfx_v8_0_cp_enable(adev, false);
 	gfx_v8_0_rlc_stop(adev);
 	gfx_v8_0_rlc_stop(adev);