|
@@ -5111,6 +5111,63 @@ static int gfx_v8_0_check_soft_reset(void *handle)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
|
|
|
+ struct amdgpu_ring *ring)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
|
|
|
+ if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
|
|
|
+ u32 tmp;
|
|
|
+ tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
|
|
|
+ tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST,
|
|
|
+ DEQUEUE_REQ, 2);
|
|
|
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp);
|
|
|
+ for (i = 0; i < adev->usec_timeout; i++) {
|
|
|
+ if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
|
|
|
+ break;
|
|
|
+ udelay(1);
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int gfx_v8_0_pre_soft_reset(void *handle)
|
|
|
+{
|
|
|
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
|
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
|
|
|
+
|
|
|
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ grbm_soft_reset = adev->gfx.grbm_soft_reset;
|
|
|
+ srbm_soft_reset = adev->gfx.srbm_soft_reset;
|
|
|
+
|
|
|
+ /* stop the rlc */
|
|
|
+ gfx_v8_0_rlc_stop(adev);
|
|
|
+
|
|
|
+ if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
|
|
|
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
|
|
|
+ /* Disable GFX parsing/prefetching */
|
|
|
+ gfx_v8_0_cp_gfx_enable(adev, false);
|
|
|
+
|
|
|
+ if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
|
|
|
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
|
|
|
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
|
|
|
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
|
|
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
|
|
|
+
|
|
|
+ gfx_v8_0_inactive_hqd(adev, ring);
|
|
|
+ }
|
|
|
+ /* Disable MEC parsing/prefetching */
|
|
|
+ gfx_v8_0_cp_compute_enable(adev, false);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int gfx_v8_0_soft_reset(void *handle)
|
|
|
{
|
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
@@ -6357,6 +6414,7 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
|
|
|
.is_idle = gfx_v8_0_is_idle,
|
|
|
.wait_for_idle = gfx_v8_0_wait_for_idle,
|
|
|
.check_soft_reset = gfx_v8_0_check_soft_reset,
|
|
|
+ .pre_soft_reset = gfx_v8_0_pre_soft_reset,
|
|
|
.soft_reset = gfx_v8_0_soft_reset,
|
|
|
.set_clockgating_state = gfx_v8_0_set_clockgating_state,
|
|
|
.set_powergating_state = gfx_v8_0_set_powergating_state,
|