Эх сурвалжийг харах

drm/amdgpu: use a timer for fence fallback

Less overhead than a work item and also adds proper cleanup handling.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Christian König 9 жил өмнө
parent
commit
c2776afe74

+ 1 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu.h

@@ -389,7 +389,6 @@ struct amdgpu_clock {
  * Fences.
  */
 struct amdgpu_fence_driver {
-	struct amdgpu_ring		*ring;
 	uint64_t			gpu_addr;
 	volatile uint32_t		*cpu_addr;
 	/* sync_seq is protected by ring emission lock */
@@ -398,7 +397,7 @@ struct amdgpu_fence_driver {
 	bool				initialized;
 	struct amdgpu_irq_src		*irq_src;
 	unsigned			irq_type;
-	struct delayed_work             lockup_work;
+	struct timer_list		fallback_timer;
 	wait_queue_head_t		fence_queue;
 };
 

+ 34 - 44
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c

@@ -84,24 +84,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
 	return seq;
 }
 
-/**
- * amdgpu_fence_schedule_check - schedule lockup check
- *
- * @ring: pointer to struct amdgpu_ring
- *
- * Queues a delayed work item to check for lockups.
- */
-static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
-{
-	/*
-	 * Do not reset the timer here with mod_delayed_work,
-	 * this can livelock in an interaction with TTM delayed destroy.
-	 */
-	queue_delayed_work(system_power_efficient_wq,
-		&ring->fence_drv.lockup_work,
-		AMDGPU_FENCE_JIFFIES_TIMEOUT);
-}
-
 /**
  * amdgpu_fence_emit - emit a fence on the requested ring
  *
@@ -135,6 +117,19 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
 	return 0;
 }
 
+/**
+ * amdgpu_fence_schedule_fallback - schedule fallback check
+ *
+ * @ring: pointer to struct amdgpu_ring
+ *
+ * Start a timer as fallback to our interrupts.
+ */
+static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
+{
+	mod_timer(&ring->fence_drv.fallback_timer,
+		  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
+}
+
 /**
  * amdgpu_fence_activity - check for fence activity
  *
@@ -201,45 +196,38 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
 	} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
 
 	if (seq < last_emitted)
-		amdgpu_fence_schedule_check(ring);
+		amdgpu_fence_schedule_fallback(ring);
 
 	return wake;
 }
 
 /**
- * amdgpu_fence_check_lockup - check for hardware lockup
+ * amdgpu_fence_process - process a fence
  *
- * @work: delayed work item
+ * @adev: amdgpu_device pointer
+ * @ring: ring index the fence is associated with
  *
- * Checks for fence activity and if there is none probe
- * the hardware if a lockup occured.
+ * Checks the current fence value and wakes the fence queue
+ * if the sequence number has increased (all asics).
  */
-static void amdgpu_fence_check_lockup(struct work_struct *work)
+void amdgpu_fence_process(struct amdgpu_ring *ring)
 {
-	struct amdgpu_fence_driver *fence_drv;
-	struct amdgpu_ring *ring;
-
-	fence_drv = container_of(work, struct amdgpu_fence_driver,
-				lockup_work.work);
-	ring = fence_drv->ring;
-
 	if (amdgpu_fence_activity(ring))
 		wake_up_all(&ring->fence_drv.fence_queue);
 }
 
 /**
- * amdgpu_fence_process - process a fence
+ * amdgpu_fence_fallback - fallback for hardware interrupts
  *
- * @adev: amdgpu_device pointer
- * @ring: ring index the fence is associated with
+ * @work: delayed work item
  *
- * Checks the current fence value and wakes the fence queue
- * if the sequence number has increased (all asics).
+ * Checks for fence activity.
  */
-void amdgpu_fence_process(struct amdgpu_ring *ring)
+static void amdgpu_fence_fallback(unsigned long arg)
 {
-	if (amdgpu_fence_activity(ring))
-		wake_up_all(&ring->fence_drv.fence_queue);
+	struct amdgpu_ring *ring = (void *)arg;
+
+	amdgpu_fence_process(ring);
 }
 
 /**
@@ -289,7 +277,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
 	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
 		return 0;
 
-	amdgpu_fence_schedule_check(ring);
+	amdgpu_fence_schedule_fallback(ring);
 	wait_event(ring->fence_drv.fence_queue, (
 		   (signaled = amdgpu_fence_seq_signaled(ring, seq))));
 
@@ -490,9 +478,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
 	atomic64_set(&ring->fence_drv.last_seq, 0);
 	ring->fence_drv.initialized = false;
 
-	INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
-			amdgpu_fence_check_lockup);
-	ring->fence_drv.ring = ring;
+	setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
+		    (unsigned long)ring);
 
 	init_waitqueue_head(&ring->fence_drv.fence_queue);
 
@@ -556,6 +543,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
 	mutex_lock(&adev->ring_lock);
 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 		struct amdgpu_ring *ring = adev->rings[i];
+
 		if (!ring || !ring->fence_drv.initialized)
 			continue;
 		r = amdgpu_fence_wait_empty(ring);
@@ -567,6 +555,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
 			       ring->fence_drv.irq_type);
 		amd_sched_fini(&ring->sched);
+		del_timer_sync(&ring->fence_drv.fallback_timer);
 		ring->fence_drv.initialized = false;
 	}
 	mutex_unlock(&adev->ring_lock);
@@ -750,7 +739,8 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
 	fence->fence_wake.func = amdgpu_fence_check_signaled;
 	__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
 	fence_get(f);
-	amdgpu_fence_schedule_check(ring);
+	if (!timer_pending(&ring->fence_drv.fallback_timer))
+		amdgpu_fence_schedule_fallback(ring);
 	FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
 	return true;
 }