|
@@ -30,27 +30,27 @@
|
|
|
/* Initialize a given run queue struct */
|
|
|
static void amd_sched_rq_init(struct amd_sched_rq *rq)
|
|
|
{
|
|
|
+ spin_lock_init(&rq->lock);
|
|
|
INIT_LIST_HEAD(&rq->entities);
|
|
|
- mutex_init(&rq->lock);
|
|
|
rq->current_entity = NULL;
|
|
|
}
|
|
|
|
|
|
static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
|
|
|
struct amd_sched_entity *entity)
|
|
|
{
|
|
|
- mutex_lock(&rq->lock);
|
|
|
+ spin_lock(&rq->lock);
|
|
|
list_add_tail(&entity->list, &rq->entities);
|
|
|
- mutex_unlock(&rq->lock);
|
|
|
+ spin_unlock(&rq->lock);
|
|
|
}
|
|
|
|
|
|
static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
|
|
|
struct amd_sched_entity *entity)
|
|
|
{
|
|
|
- mutex_lock(&rq->lock);
|
|
|
+ spin_lock(&rq->lock);
|
|
|
list_del_init(&entity->list);
|
|
|
if (rq->current_entity == entity)
|
|
|
rq->current_entity = NULL;
|
|
|
- mutex_unlock(&rq->lock);
|
|
|
+ spin_unlock(&rq->lock);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -61,12 +61,16 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
|
|
|
static struct amd_sched_entity *
|
|
|
amd_sched_rq_select_entity(struct amd_sched_rq *rq)
|
|
|
{
|
|
|
- struct amd_sched_entity *entity = rq->current_entity;
|
|
|
+ struct amd_sched_entity *entity;
|
|
|
|
|
|
+ spin_lock(&rq->lock);
|
|
|
+
|
|
|
+ entity = rq->current_entity;
|
|
|
if (entity) {
|
|
|
list_for_each_entry_continue(entity, &rq->entities, list) {
|
|
|
if (!kfifo_is_empty(&entity->job_queue)) {
|
|
|
rq->current_entity = entity;
|
|
|
+ spin_unlock(&rq->lock);
|
|
|
return rq->current_entity;
|
|
|
}
|
|
|
}
|
|
@@ -76,6 +80,7 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
|
|
|
|
|
|
if (!kfifo_is_empty(&entity->job_queue)) {
|
|
|
rq->current_entity = entity;
|
|
|
+ spin_unlock(&rq->lock);
|
|
|
return rq->current_entity;
|
|
|
}
|
|
|
|
|
@@ -83,6 +88,8 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
+ spin_unlock(&rq->lock);
|
|
|
+
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
@@ -108,22 +115,6 @@ static bool is_scheduler_ready(struct amd_gpu_scheduler *sched)
|
|
|
return full;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * Select next entity from the kernel run queue, if not available,
|
|
|
- * return null.
|
|
|
-*/
|
|
|
-static struct amd_sched_entity *
|
|
|
-kernel_rq_select_context(struct amd_gpu_scheduler *sched)
|
|
|
-{
|
|
|
- struct amd_sched_entity *sched_entity;
|
|
|
- struct amd_sched_rq *rq = &sched->kernel_rq;
|
|
|
-
|
|
|
- mutex_lock(&rq->lock);
|
|
|
- sched_entity = amd_sched_rq_select_entity(rq);
|
|
|
- mutex_unlock(&rq->lock);
|
|
|
- return sched_entity;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* Select next entity containing real IB submissions
|
|
|
*/
|
|
@@ -132,21 +123,15 @@ select_context(struct amd_gpu_scheduler *sched)
|
|
|
{
|
|
|
struct amd_sched_entity *wake_entity = NULL;
|
|
|
struct amd_sched_entity *tmp;
|
|
|
- struct amd_sched_rq *rq;
|
|
|
|
|
|
if (!is_scheduler_ready(sched))
|
|
|
return NULL;
|
|
|
|
|
|
/* Kernel run queue has higher priority than normal run queue*/
|
|
|
- tmp = kernel_rq_select_context(sched);
|
|
|
- if (tmp != NULL)
|
|
|
- goto exit;
|
|
|
-
|
|
|
- rq = &sched->sched_rq;
|
|
|
- mutex_lock(&rq->lock);
|
|
|
- tmp = amd_sched_rq_select_entity(rq);
|
|
|
- mutex_unlock(&rq->lock);
|
|
|
-exit:
|
|
|
+ tmp = amd_sched_rq_select_entity(&sched->kernel_rq);
|
|
|
+ if (tmp == NULL)
|
|
|
+ tmp = amd_sched_rq_select_entity(&sched->sched_rq);
|
|
|
+
|
|
|
if (sched->current_entity && (sched->current_entity != tmp))
|
|
|
wake_entity = sched->current_entity;
|
|
|
sched->current_entity = tmp;
|