|
@@ -153,7 +153,25 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
|
|
struct v3d_job *job = to_v3d_job(sched_job);
|
|
struct v3d_job *job = to_v3d_job(sched_job);
|
|
struct v3d_exec_info *exec = job->exec;
|
|
struct v3d_exec_info *exec = job->exec;
|
|
struct v3d_dev *v3d = exec->v3d;
|
|
struct v3d_dev *v3d = exec->v3d;
|
|
|
|
+ enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
|
|
enum v3d_queue q;
|
|
enum v3d_queue q;
|
|
|
|
+ u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
|
|
|
|
+ u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
|
|
|
|
+
|
|
|
|
+ /* If the current address or return address have changed, then
|
|
|
|
+ * the GPU has probably made progress and we should delay the
|
|
|
|
+ * reset. This could fail if the GPU got in an infinite loop
|
|
|
|
+ * in the CL, but that is pretty unlikely outside of an i-g-t
|
|
|
|
+ * testcase.
|
|
|
|
+ */
|
|
|
|
+ if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
|
|
|
|
+ job->timedout_ctca = ctca;
|
|
|
|
+ job->timedout_ctra = ctra;
|
|
|
|
+
|
|
|
|
+ schedule_delayed_work(&job->base.work_tdr,
|
|
|
|
+ job->base.sched->timeout);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
|
|
mutex_lock(&v3d->reset_lock);
|
|
mutex_lock(&v3d->reset_lock);
|
|
|
|
|