|
@@ -139,7 +139,6 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
|
|
|
entity->last_scheduled = NULL;
|
|
|
|
|
|
spin_lock_init(&entity->rq_lock);
|
|
|
- spin_lock_init(&entity->queue_lock);
|
|
|
spsc_queue_init(&entity->job_queue);
|
|
|
|
|
|
atomic_set(&entity->fence_seq, 0);
|
|
@@ -413,6 +412,10 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
|
|
|
*
|
|
|
* @sched_job The pointer to job required to submit
|
|
|
*
|
|
|
+ * Note: To guarantee that the order of insertion to queue matches
|
|
|
+ * the job's fence sequence number this function should be
|
|
|
+ * called with drm_sched_job_init under common lock.
|
|
|
+ *
|
|
|
* Returns 0 for success, negative error code otherwise.
|
|
|
*/
|
|
|
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
|
|
@@ -423,11 +426,8 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
|
|
|
|
|
|
trace_drm_sched_job(sched_job, entity);
|
|
|
|
|
|
- spin_lock(&entity->queue_lock);
|
|
|
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
|
|
|
|
|
|
- spin_unlock(&entity->queue_lock);
|
|
|
-
|
|
|
/* first job wakes up scheduler */
|
|
|
if (first) {
|
|
|
/* Add the entity to the run queue */
|
|
@@ -593,7 +593,12 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
|
|
|
}
|
|
|
EXPORT_SYMBOL(drm_sched_job_recovery);
|
|
|
|
|
|
-/* init a sched_job with basic field */
|
|
|
+/**
|
|
|
+ * Init a sched_job with basic field
|
|
|
+ *
|
|
|
+ * Note: Refer to drm_sched_entity_push_job documentation
|
|
|
+ * for locking considerations.
|
|
|
+ */
|
|
|
int drm_sched_job_init(struct drm_sched_job *job,
|
|
|
struct drm_gpu_scheduler *sched,
|
|
|
struct drm_sched_entity *entity,
|