|
@@ -666,7 +666,10 @@ static int gfx_v9_0_compute_mqd_sw_init(struct amdgpu_device *adev)
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
- /*TODO: prepare MQD backup */
|
|
|
+ /* prepare MQD backup */
|
|
|
+ adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(sizeof(struct v9_mqd), GFP_KERNEL);
|
|
|
+ if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
|
|
|
+ dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
|
|
|
}
|
|
|
|
|
|
/* create MQD for each KCQ */
|
|
@@ -681,7 +684,10 @@ static int gfx_v9_0_compute_mqd_sw_init(struct amdgpu_device *adev)
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
- /* TODO: prepare MQD backup */
|
|
|
+ /* prepare MQD backup */
|
|
|
+ adev->gfx.mec.mqd_backup[i] = kmalloc(sizeof(struct v9_mqd), GFP_KERNEL);
|
|
|
+ if (!adev->gfx.mec.mqd_backup[i])
|
|
|
+ dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -695,10 +701,12 @@ static void gfx_v9_0_compute_mqd_sw_fini(struct amdgpu_device *adev)
|
|
|
|
|
|
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
|
|
ring = &adev->gfx.compute_ring[i];
|
|
|
+ kfree(adev->gfx.mec.mqd_backup[i]);
|
|
|
amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, (void **)&ring->mqd_ptr);
|
|
|
}
|
|
|
|
|
|
ring = &adev->gfx.kiq.ring;
|
|
|
+ kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
|
|
|
amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, (void **)&ring->mqd_ptr);
|
|
|
}
|
|
|
|
|
@@ -2132,8 +2140,12 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
|
|
|
soc15_grbm_select(adev, 0, 0, 0, 0);
|
|
|
mutex_unlock(&adev->srbm_mutex);
|
|
|
|
|
|
+ if (adev->gfx.mec.mqd_backup[mqd_idx])
|
|
|
+ memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
|
|
|
} else { /* for GPU_RESET case */
|
|
|
/* reset MQD to a clean status */
|
|
|
+ if (adev->gfx.mec.mqd_backup[mqd_idx])
|
|
|
+ memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
|
|
|
|
|
|
/* reset ring buffer */
|
|
|
ring->wptr = 0;
|