|
@@ -2827,163 +2827,172 @@ err:
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * amdgpu_sriov_gpu_reset - reset the asic
|
|
|
+/*
|
|
|
+ * amdgpu_reset - reset ASIC/GPU for bare-metal or passthrough
|
|
|
*
|
|
|
* @adev: amdgpu device pointer
|
|
|
- * @job: which job trigger hang
|
|
|
+ * @reset_flags: output param tells caller the reset result
|
|
|
*
|
|
|
- * Attempt the reset the GPU if it has hung (all asics).
|
|
|
- * for SRIOV case.
|
|
|
- * Returns 0 for success or an error on failure.
|
|
|
- */
|
|
|
-int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
|
|
|
+ * attempt to do soft-reset or full-reset and reinitialize Asic
|
|
|
+ * return 0 means successed otherwise failed
|
|
|
+*/
|
|
|
+static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
|
|
|
{
|
|
|
- int i, j, r = 0;
|
|
|
- int resched;
|
|
|
- struct amdgpu_bo *bo, *tmp;
|
|
|
- struct amdgpu_ring *ring;
|
|
|
- struct dma_fence *fence = NULL, *next = NULL;
|
|
|
+ bool need_full_reset, vram_lost = 0;
|
|
|
+ int r;
|
|
|
|
|
|
- mutex_lock(&adev->virt.lock_reset);
|
|
|
- atomic_inc(&adev->gpu_reset_counter);
|
|
|
- adev->in_sriov_reset = true;
|
|
|
+ need_full_reset = amdgpu_need_full_reset(adev);
|
|
|
|
|
|
- /* block TTM */
|
|
|
- resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
|
|
+ if (!need_full_reset) {
|
|
|
+ amdgpu_pre_soft_reset(adev);
|
|
|
+ r = amdgpu_soft_reset(adev);
|
|
|
+ amdgpu_post_soft_reset(adev);
|
|
|
+ if (r || amdgpu_check_soft_reset(adev)) {
|
|
|
+ DRM_INFO("soft reset failed, will fallback to full reset!\n");
|
|
|
+ need_full_reset = true;
|
|
|
+ }
|
|
|
|
|
|
- /* we start from the ring trigger GPU hang */
|
|
|
- j = job ? job->ring->idx : 0;
|
|
|
+ }
|
|
|
|
|
|
- /* block scheduler */
|
|
|
- for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
|
|
|
- ring = adev->rings[i % AMDGPU_MAX_RINGS];
|
|
|
- if (!ring || !ring->sched.thread)
|
|
|
- continue;
|
|
|
+ if (need_full_reset) {
|
|
|
+ r = amdgpu_suspend(adev);
|
|
|
|
|
|
- kthread_park(ring->sched.thread);
|
|
|
+retry:
|
|
|
+ amdgpu_atombios_scratch_regs_save(adev);
|
|
|
+ r = amdgpu_asic_reset(adev);
|
|
|
+ amdgpu_atombios_scratch_regs_restore(adev);
|
|
|
+ /* post card */
|
|
|
+ amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
|
|
|
|
|
- if (job && j != i)
|
|
|
- continue;
|
|
|
+ if (!r) {
|
|
|
+ dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
|
|
|
+ r = amdgpu_resume_phase1(adev);
|
|
|
+ if (r)
|
|
|
+ goto out;
|
|
|
|
|
|
- /* here give the last chance to check if job removed from mirror-list
|
|
|
- * since we already pay some time on kthread_park */
|
|
|
- if (job && list_empty(&job->base.node)) {
|
|
|
- kthread_unpark(ring->sched.thread);
|
|
|
- goto give_up_reset;
|
|
|
+ vram_lost = amdgpu_check_vram_lost(adev);
|
|
|
+ if (vram_lost) {
|
|
|
+ DRM_ERROR("VRAM is lost!\n");
|
|
|
+ atomic_inc(&adev->vram_lost_counter);
|
|
|
+ }
|
|
|
+
|
|
|
+ r = amdgpu_ttm_recover_gart(adev);
|
|
|
+ if (r)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ r = amdgpu_resume_phase2(adev);
|
|
|
+ if (r)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ if (vram_lost)
|
|
|
+ amdgpu_fill_reset_magic(adev);
|
|
|
}
|
|
|
+ }
|
|
|
|
|
|
- if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
|
|
|
- amd_sched_job_kickout(&job->base);
|
|
|
+out:
|
|
|
+ if (!r) {
|
|
|
+ amdgpu_irq_gpu_reset_resume_helper(adev);
|
|
|
+ r = amdgpu_ib_ring_tests(adev);
|
|
|
+ if (r) {
|
|
|
+ dev_err(adev->dev, "ib ring test failed (%d).\n", r);
|
|
|
+ r = amdgpu_suspend(adev);
|
|
|
+ need_full_reset = true;
|
|
|
+ goto retry;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- /* only do job_reset on the hang ring if @job not NULL */
|
|
|
- amd_sched_hw_job_reset(&ring->sched, NULL);
|
|
|
+ if (reset_flags) {
|
|
|
+ if (vram_lost)
|
|
|
+ (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
|
|
|
|
|
|
- /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
|
|
- amdgpu_fence_driver_force_completion(ring);
|
|
|
+ if (need_full_reset)
|
|
|
+ (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
|
|
|
}
|
|
|
|
|
|
- /* request to take full control of GPU before re-initialization */
|
|
|
- if (job)
|
|
|
- amdgpu_virt_reset_gpu(adev);
|
|
|
- else
|
|
|
- amdgpu_virt_request_full_gpu(adev, true);
|
|
|
+ return r;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * amdgpu_reset_sriov - reset ASIC for SR-IOV vf
|
|
|
+ *
|
|
|
+ * @adev: amdgpu device pointer
|
|
|
+ * @reset_flags: output param tells caller the reset result
|
|
|
+ *
|
|
|
+ * do VF FLR and reinitialize Asic
|
|
|
+ * return 0 means successed otherwise failed
|
|
|
+*/
|
|
|
+static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, bool from_hypervisor)
|
|
|
+{
|
|
|
+ int r;
|
|
|
|
|
|
+ if (from_hypervisor)
|
|
|
+ r = amdgpu_virt_request_full_gpu(adev, true);
|
|
|
+ else
|
|
|
+ r = amdgpu_virt_reset_gpu(adev);
|
|
|
+ if (r)
|
|
|
+ return r;
|
|
|
|
|
|
/* Resume IP prior to SMC */
|
|
|
- amdgpu_sriov_reinit_early(adev);
|
|
|
+ r = amdgpu_sriov_reinit_early(adev);
|
|
|
+ if (r)
|
|
|
+ goto error;
|
|
|
|
|
|
/* we need recover gart prior to run SMC/CP/SDMA resume */
|
|
|
amdgpu_ttm_recover_gart(adev);
|
|
|
|
|
|
/* now we are okay to resume SMC/CP/SDMA */
|
|
|
- amdgpu_sriov_reinit_late(adev);
|
|
|
+ r = amdgpu_sriov_reinit_late(adev);
|
|
|
+ if (r)
|
|
|
+ goto error;
|
|
|
|
|
|
amdgpu_irq_gpu_reset_resume_helper(adev);
|
|
|
-
|
|
|
- if (amdgpu_ib_ring_tests(adev))
|
|
|
+ r = amdgpu_ib_ring_tests(adev);
|
|
|
+ if (r)
|
|
|
dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
|
|
|
|
|
|
+error:
|
|
|
/* release full control of GPU after ib test */
|
|
|
amdgpu_virt_release_full_gpu(adev, true);
|
|
|
|
|
|
- DRM_INFO("recover vram bo from shadow\n");
|
|
|
-
|
|
|
- ring = adev->mman.buffer_funcs_ring;
|
|
|
- mutex_lock(&adev->shadow_list_lock);
|
|
|
- list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
|
|
|
- next = NULL;
|
|
|
- amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
|
|
|
- if (fence) {
|
|
|
- r = dma_fence_wait(fence, false);
|
|
|
- if (r) {
|
|
|
- WARN(r, "recovery from shadow isn't completed\n");
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- dma_fence_put(fence);
|
|
|
- fence = next;
|
|
|
- }
|
|
|
- mutex_unlock(&adev->shadow_list_lock);
|
|
|
-
|
|
|
- if (fence) {
|
|
|
- r = dma_fence_wait(fence, false);
|
|
|
- if (r)
|
|
|
- WARN(r, "recovery from shadow isn't completed\n");
|
|
|
- }
|
|
|
- dma_fence_put(fence);
|
|
|
-
|
|
|
- for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
|
|
|
- ring = adev->rings[i % AMDGPU_MAX_RINGS];
|
|
|
- if (!ring || !ring->sched.thread)
|
|
|
- continue;
|
|
|
-
|
|
|
- if (job && j != i) {
|
|
|
- kthread_unpark(ring->sched.thread);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- amd_sched_job_recovery(&ring->sched);
|
|
|
- kthread_unpark(ring->sched.thread);
|
|
|
- }
|
|
|
+ if (reset_flags) {
|
|
|
+ /* will get vram_lost from GIM in future, now all
|
|
|
+ * reset request considered VRAM LOST
|
|
|
+ */
|
|
|
+ (*reset_flags) |= ~AMDGPU_RESET_INFO_VRAM_LOST;
|
|
|
+ atomic_inc(&adev->vram_lost_counter);
|
|
|
|
|
|
- drm_helper_resume_force_mode(adev->ddev);
|
|
|
-give_up_reset:
|
|
|
- ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
|
|
- if (r) {
|
|
|
- /* bad news, how to tell it to userspace ? */
|
|
|
- dev_info(adev->dev, "GPU reset failed\n");
|
|
|
- } else {
|
|
|
- dev_info(adev->dev, "GPU reset successed!\n");
|
|
|
+ /* VF FLR or hotlink reset is always full-reset */
|
|
|
+ (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
|
|
|
}
|
|
|
|
|
|
- adev->in_sriov_reset = false;
|
|
|
- mutex_unlock(&adev->virt.lock_reset);
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * amdgpu_gpu_reset - reset the asic
|
|
|
+ * amdgpu_gpu_recover - reset the asic and recover scheduler
|
|
|
*
|
|
|
* @adev: amdgpu device pointer
|
|
|
+ * @job: which job trigger hang
|
|
|
*
|
|
|
- * Attempt the reset the GPU if it has hung (all asics).
|
|
|
+ * Attempt to reset the GPU if it has hung (all asics).
|
|
|
* Returns 0 for success or an error on failure.
|
|
|
*/
|
|
|
-int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
|
|
+int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
|
|
|
{
|
|
|
struct drm_atomic_state *state = NULL;
|
|
|
- int i, r;
|
|
|
- int resched;
|
|
|
- bool need_full_reset, vram_lost = false;
|
|
|
+ uint64_t reset_flags = 0;
|
|
|
+ int i, r, resched;
|
|
|
|
|
|
if (!amdgpu_check_soft_reset(adev)) {
|
|
|
DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+ dev_info(adev->dev, "GPU reset begin!\n");
|
|
|
+
|
|
|
+ mutex_lock(&adev->virt.lock_reset);
|
|
|
atomic_inc(&adev->gpu_reset_counter);
|
|
|
+ adev->in_sriov_reset = 1;
|
|
|
|
|
|
/* block TTM */
|
|
|
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
|
@@ -2997,69 +3006,26 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
|
|
|
|
|
if (!ring || !ring->sched.thread)
|
|
|
continue;
|
|
|
+
|
|
|
+ /* only focus on the ring hit timeout if &job not NULL */
|
|
|
+ if (job && job->ring->idx != i)
|
|
|
+ continue;
|
|
|
+
|
|
|
kthread_park(ring->sched.thread);
|
|
|
- amd_sched_hw_job_reset(&ring->sched, NULL);
|
|
|
+ amd_sched_hw_job_reset(&ring->sched, &job->base);
|
|
|
+
|
|
|
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
|
|
amdgpu_fence_driver_force_completion(ring);
|
|
|
}
|
|
|
|
|
|
- need_full_reset = amdgpu_need_full_reset(adev);
|
|
|
-
|
|
|
- if (!need_full_reset) {
|
|
|
- amdgpu_pre_soft_reset(adev);
|
|
|
- r = amdgpu_soft_reset(adev);
|
|
|
- amdgpu_post_soft_reset(adev);
|
|
|
- if (r || amdgpu_check_soft_reset(adev)) {
|
|
|
- DRM_INFO("soft reset failed, will fallback to full reset!\n");
|
|
|
- need_full_reset = true;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (need_full_reset) {
|
|
|
- r = amdgpu_suspend(adev);
|
|
|
-
|
|
|
-retry:
|
|
|
- amdgpu_atombios_scratch_regs_save(adev);
|
|
|
- r = amdgpu_asic_reset(adev);
|
|
|
- amdgpu_atombios_scratch_regs_restore(adev);
|
|
|
- /* post card */
|
|
|
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
|
|
+ if (amdgpu_sriov_vf(adev))
|
|
|
+ r = amdgpu_reset_sriov(adev, &reset_flags, job ? false : true);
|
|
|
+ else
|
|
|
+ r = amdgpu_reset(adev, &reset_flags);
|
|
|
|
|
|
- if (!r) {
|
|
|
- dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
|
|
|
- r = amdgpu_resume_phase1(adev);
|
|
|
- if (r)
|
|
|
- goto out;
|
|
|
- vram_lost = amdgpu_check_vram_lost(adev);
|
|
|
- if (vram_lost) {
|
|
|
- DRM_ERROR("VRAM is lost!\n");
|
|
|
- atomic_inc(&adev->vram_lost_counter);
|
|
|
- }
|
|
|
- r = amdgpu_ttm_recover_gart(adev);
|
|
|
- if (r)
|
|
|
- goto out;
|
|
|
- r = amdgpu_resume_phase2(adev);
|
|
|
- if (r)
|
|
|
- goto out;
|
|
|
- if (vram_lost)
|
|
|
- amdgpu_fill_reset_magic(adev);
|
|
|
- }
|
|
|
- }
|
|
|
-out:
|
|
|
if (!r) {
|
|
|
- amdgpu_irq_gpu_reset_resume_helper(adev);
|
|
|
- r = amdgpu_ib_ring_tests(adev);
|
|
|
- if (r) {
|
|
|
- dev_err(adev->dev, "ib ring test failed (%d).\n", r);
|
|
|
- r = amdgpu_suspend(adev);
|
|
|
- need_full_reset = true;
|
|
|
- goto retry;
|
|
|
- }
|
|
|
- /**
|
|
|
- * recovery vm page tables, since we cannot depend on VRAM is
|
|
|
- * consistent after gpu full reset.
|
|
|
- */
|
|
|
- if (need_full_reset && amdgpu_need_backup(adev)) {
|
|
|
+ if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) ||
|
|
|
+ (reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) {
|
|
|
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
|
|
struct amdgpu_bo *bo, *tmp;
|
|
|
struct dma_fence *fence = NULL, *next = NULL;
|
|
@@ -3088,40 +3054,56 @@ out:
|
|
|
}
|
|
|
dma_fence_put(fence);
|
|
|
}
|
|
|
+
|
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
|
|
struct amdgpu_ring *ring = adev->rings[i];
|
|
|
|
|
|
if (!ring || !ring->sched.thread)
|
|
|
continue;
|
|
|
|
|
|
+ /* only focus on the ring hit timeout if &job not NULL */
|
|
|
+ if (job && job->ring->idx != i)
|
|
|
+ continue;
|
|
|
+
|
|
|
amd_sched_job_recovery(&ring->sched);
|
|
|
kthread_unpark(ring->sched.thread);
|
|
|
}
|
|
|
} else {
|
|
|
- dev_err(adev->dev, "asic resume failed (%d).\n", r);
|
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
|
|
- if (adev->rings[i] && adev->rings[i]->sched.thread) {
|
|
|
- kthread_unpark(adev->rings[i]->sched.thread);
|
|
|
- }
|
|
|
+ struct amdgpu_ring *ring = adev->rings[i];
|
|
|
+
|
|
|
+ if (!ring || !ring->sched.thread)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /* only focus on the ring hit timeout if &job not NULL */
|
|
|
+ if (job && job->ring->idx != i)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ kthread_unpark(adev->rings[i]->sched.thread);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
if (amdgpu_device_has_dc_support(adev)) {
|
|
|
- r = drm_atomic_helper_resume(adev->ddev, state);
|
|
|
+ if (drm_atomic_helper_resume(adev->ddev, state))
|
|
|
+ dev_info(adev->dev, "drm resume failed:%d\n", r);
|
|
|
amdgpu_dm_display_resume(adev);
|
|
|
- } else
|
|
|
+ } else {
|
|
|
drm_helper_resume_force_mode(adev->ddev);
|
|
|
+ }
|
|
|
|
|
|
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
|
|
+
|
|
|
if (r) {
|
|
|
/* bad news, how to tell it to userspace ? */
|
|
|
- dev_info(adev->dev, "GPU reset failed\n");
|
|
|
- }
|
|
|
- else {
|
|
|
- dev_info(adev->dev, "GPU reset successed!\n");
|
|
|
+ dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
|
|
|
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
|
|
|
+ } else {
|
|
|
+ dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
|
|
|
}
|
|
|
|
|
|
amdgpu_vf_error_trans_all(adev);
|
|
|
+ adev->in_sriov_reset = 0;
|
|
|
+ mutex_unlock(&adev->virt.lock_reset);
|
|
|
return r;
|
|
|
}
|
|
|
|