瀏覽代碼

drm/v3d: Remove the bad signaled() implementation.

Since our seqno value comes from a counter associated with the GPU
ring, not the entity (aka client), they'll be completed out of order.
There's actually no need for this code at all, since we don't have
enable_signaling() and thus DMA_FENCE_SIGNALED_BIT will be set before
we could be called.

Signed-off-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20180605190302.18279-2-eric@anholt.net
Reviewed-by: Lucas Stach <l.stach@pengutronix.de>
Eric Anholt 7 年之前
父節點
當前提交
14d1d19086
共有 4 個文件被更改,包括 6 次插入18 次删除
  1. 0 1
      drivers/gpu/drm/v3d/v3d_drv.h
  2. 4 9
      drivers/gpu/drm/v3d/v3d_fence.c
  3. 2 5
      drivers/gpu/drm/v3d/v3d_gem.c
  4. 0 3
      drivers/gpu/drm/v3d/v3d_irq.c

+ 0 - 1
drivers/gpu/drm/v3d/v3d_drv.h

@@ -25,7 +25,6 @@ struct v3d_queue_state {
 
 
 	u64 fence_context;
 	u64 fence_context;
 	u64 emit_seqno;
 	u64 emit_seqno;
-	u64 finished_seqno;
 };
 };
 
 
 struct v3d_dev {
 struct v3d_dev {

+ 4 - 9
drivers/gpu/drm/v3d/v3d_fence.c

@@ -40,19 +40,14 @@ static bool v3d_fence_enable_signaling(struct dma_fence *fence)
 	return true;
 	return true;
 }
 }
 
 
-static bool v3d_fence_signaled(struct dma_fence *fence)
-{
-	struct v3d_fence *f = to_v3d_fence(fence);
-	struct v3d_dev *v3d = to_v3d_dev(f->dev);
-
-	return v3d->queue[f->queue].finished_seqno >= f->seqno;
-}
-
 const struct dma_fence_ops v3d_fence_ops = {
 const struct dma_fence_ops v3d_fence_ops = {
 	.get_driver_name = v3d_fence_get_driver_name,
 	.get_driver_name = v3d_fence_get_driver_name,
 	.get_timeline_name = v3d_fence_get_timeline_name,
 	.get_timeline_name = v3d_fence_get_timeline_name,
 	.enable_signaling = v3d_fence_enable_signaling,
 	.enable_signaling = v3d_fence_enable_signaling,
-	.signaled = v3d_fence_signaled,
+	/* Each of our fences gets signaled as complete by the IRQ
+	 * handler, so we rely on the core's tracking of signaling.
+	 */
+	.signaled = NULL,
 	.wait = dma_fence_default_wait,
 	.wait = dma_fence_default_wait,
 	.release = dma_fence_free,
 	.release = dma_fence_free,
 };
 };

+ 2 - 5
drivers/gpu/drm/v3d/v3d_gem.c

@@ -654,17 +654,14 @@ void
 v3d_gem_destroy(struct drm_device *dev)
 v3d_gem_destroy(struct drm_device *dev)
 {
 {
 	struct v3d_dev *v3d = to_v3d_dev(dev);
 	struct v3d_dev *v3d = to_v3d_dev(dev);
-	enum v3d_queue q;
 
 
 	v3d_sched_fini(v3d);
 	v3d_sched_fini(v3d);
 
 
 	/* Waiting for exec to finish would need to be done before
 	/* Waiting for exec to finish would need to be done before
 	 * unregistering V3D.
 	 * unregistering V3D.
 	 */
 	 */
-	for (q = 0; q < V3D_MAX_QUEUES; q++) {
-		WARN_ON(v3d->queue[q].emit_seqno !=
-			v3d->queue[q].finished_seqno);
-	}
+	WARN_ON(v3d->bin_job);
+	WARN_ON(v3d->render_job);
 
 
 	drm_mm_takedown(&v3d->mm);
 	drm_mm_takedown(&v3d->mm);
 
 

+ 0 - 3
drivers/gpu/drm/v3d/v3d_irq.c

@@ -87,15 +87,12 @@ v3d_irq(int irq, void *arg)
 	}
 	}
 
 
 	if (intsts & V3D_INT_FLDONE) {
 	if (intsts & V3D_INT_FLDONE) {
-		v3d->queue[V3D_BIN].finished_seqno++;
 		dma_fence_signal(v3d->bin_job->bin.done_fence);
 		dma_fence_signal(v3d->bin_job->bin.done_fence);
 		status = IRQ_HANDLED;
 		status = IRQ_HANDLED;
 	}
 	}
 
 
 	if (intsts & V3D_INT_FRDONE) {
 	if (intsts & V3D_INT_FRDONE) {
-		v3d->queue[V3D_RENDER].finished_seqno++;
 		dma_fence_signal(v3d->render_job->render.done_fence);
 		dma_fence_signal(v3d->render_job->render.done_fence);
-
 		status = IRQ_HANDLED;
 		status = IRQ_HANDLED;
 	}
 	}