|
@@ -1896,6 +1896,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
|
|
|
|
|
atomic_inc(&adev->gpu_reset_counter);
|
|
atomic_inc(&adev->gpu_reset_counter);
|
|
|
|
|
|
|
|
+ /* evict vram memory */
|
|
|
|
+ amdgpu_bo_evict_vram(adev);
|
|
|
|
+
|
|
/* block scheduler */
|
|
/* block scheduler */
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
|
struct amdgpu_ring *ring = adev->rings[i];
|
|
struct amdgpu_ring *ring = adev->rings[i];
|
|
@@ -1904,6 +1907,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
|
continue;
|
|
continue;
|
|
kthread_park(ring->sched.thread);
|
|
kthread_park(ring->sched.thread);
|
|
}
|
|
}
|
|
|
|
+
|
|
/* block TTM */
|
|
/* block TTM */
|
|
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
|
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
|
|
|
|