浏览代码

drm/scheduler: change entities rq even earlier

Looks like for correct debugging we need to know the scheduler even
earlier. So move picking a rq for an entity into job creation.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Christian König 7 年之前
父节点
当前提交
35e160e781
共有 2 个文件被更改,包括 33 次插入19 次删除
  1. 32 18
      drivers/gpu/drm/scheduler/gpu_scheduler.c
  2. 1 1
      drivers/gpu/drm/scheduler/sched_fence.c

+ 32 - 18
drivers/gpu/drm/scheduler/gpu_scheduler.c

@@ -549,6 +549,34 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 	return sched_job;
 }
 
+/**
+ * drm_sched_entity_select_rq - select a new rq for the entity
+ *
+ * @entity: scheduler entity
+ *
+ * Check all prerequisites and select a new rq for the entity for load
+ * balancing.
+ */
+static void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
+{
+	struct dma_fence *fence;
+	struct drm_sched_rq *rq;
+
+	if (!spsc_queue_count(&entity->job_queue) == 0 ||
+	    entity->num_rq_list <= 1)
+		return;
+
+	fence = READ_ONCE(entity->last_scheduled);
+	if (fence && !dma_fence_is_signaled(fence))
+		return;
+
+	rq = drm_sched_entity_get_free_sched(entity);
+	spin_lock(&entity->rq_lock);
+	drm_sched_rq_remove_entity(entity->rq, entity);
+	entity->rq = rq;
+	spin_unlock(&entity->rq_lock);
+}
+
 /**
  * drm_sched_entity_push_job - Submit a job to the entity's job queue
  *
@@ -564,25 +592,8 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 			       struct drm_sched_entity *entity)
 {
-	struct drm_sched_rq *rq = entity->rq;
 	bool first;
 
-	first = spsc_queue_count(&entity->job_queue) == 0;
-	if (first && (entity->num_rq_list > 1)) {
-		struct dma_fence *fence;
-
-		fence = READ_ONCE(entity->last_scheduled);
-		if (fence == NULL || dma_fence_is_signaled(fence)) {
-			rq = drm_sched_entity_get_free_sched(entity);
-			spin_lock(&entity->rq_lock);
-			drm_sched_rq_remove_entity(entity->rq, entity);
-			entity->rq = rq;
-			spin_unlock(&entity->rq_lock);
-		}
-	}
-
-	sched_job->sched = entity->rq->sched;
-	sched_job->s_fence->sched = entity->rq->sched;
 	trace_drm_sched_job(sched_job, entity);
 	atomic_inc(&entity->rq->sched->num_jobs);
 	WRITE_ONCE(entity->last_user, current->group_leader);
@@ -786,7 +797,10 @@ int drm_sched_job_init(struct drm_sched_job *job,
 		       struct drm_sched_entity *entity,
 		       void *owner)
 {
-	struct drm_gpu_scheduler *sched = entity->rq->sched;
+	struct drm_gpu_scheduler *sched;
+
+	drm_sched_entity_select_rq(entity);
+	sched = entity->rq->sched;
 
 	job->sched = sched;
 	job->entity = entity;

+ 1 - 1
drivers/gpu/drm/scheduler/sched_fence.c

@@ -161,7 +161,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
 		return NULL;
 
 	fence->owner = owner;
-	fence->sched = NULL;
+	fence->sched = entity->rq->sched;
 	spin_lock_init(&fence->lock);
 
 	seq = atomic_inc_return(&entity->fence_seq);