|
@@ -44,9 +44,9 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
|
|
|
struct queue *q,
|
|
|
struct qcm_process_device *qpd);
|
|
|
|
|
|
-static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
|
|
|
+static int execute_queues_cpsch(struct device_queue_manager *dqm);
|
|
|
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
|
|
- bool static_queues_included, bool lock);
|
|
|
+ bool static_queues_included);
|
|
|
|
|
|
static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
|
|
|
struct queue *q,
|
|
@@ -379,7 +379,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
|
|
|
dqm->queue_count--;
|
|
|
|
|
|
if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
|
|
|
- retval = execute_queues_cpsch(dqm, false);
|
|
|
+ retval = execute_queues_cpsch(dqm);
|
|
|
|
|
|
out_unlock:
|
|
|
mutex_unlock(&dqm->lock);
|
|
@@ -695,7 +695,9 @@ static int start_cpsch(struct device_queue_manager *dqm)
|
|
|
|
|
|
init_interrupts(dqm);
|
|
|
|
|
|
- execute_queues_cpsch(dqm, true);
|
|
|
+ mutex_lock(&dqm->lock);
|
|
|
+ execute_queues_cpsch(dqm);
|
|
|
+ mutex_unlock(&dqm->lock);
|
|
|
|
|
|
return 0;
|
|
|
fail_allocate_vidmem:
|
|
@@ -707,7 +709,9 @@ fail_packet_manager_init:
|
|
|
|
|
|
static int stop_cpsch(struct device_queue_manager *dqm)
|
|
|
{
|
|
|
- unmap_queues_cpsch(dqm, true, true);
|
|
|
+ mutex_lock(&dqm->lock);
|
|
|
+ unmap_queues_cpsch(dqm, true);
|
|
|
+ mutex_unlock(&dqm->lock);
|
|
|
|
|
|
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
|
|
|
pm_uninit(&dqm->packets);
|
|
@@ -738,7 +742,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
|
|
|
list_add(&kq->list, &qpd->priv_queue_list);
|
|
|
dqm->queue_count++;
|
|
|
qpd->is_debug = true;
|
|
|
- execute_queues_cpsch(dqm, false);
|
|
|
+ execute_queues_cpsch(dqm);
|
|
|
mutex_unlock(&dqm->lock);
|
|
|
|
|
|
return 0;
|
|
@@ -750,11 +754,11 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
|
|
|
{
|
|
|
mutex_lock(&dqm->lock);
|
|
|
/* here we actually preempt the DIQ */
|
|
|
- unmap_queues_cpsch(dqm, true, false);
|
|
|
+ unmap_queues_cpsch(dqm, true);
|
|
|
list_del(&kq->list);
|
|
|
dqm->queue_count--;
|
|
|
qpd->is_debug = false;
|
|
|
- execute_queues_cpsch(dqm, false);
|
|
|
+ execute_queues_cpsch(dqm);
|
|
|
/*
|
|
|
* Unconditionally decrement this counter, regardless of the queue's
|
|
|
* type.
|
|
@@ -813,7 +817,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
|
|
|
list_add(&q->list, &qpd->queues_list);
|
|
|
if (q->properties.is_active) {
|
|
|
dqm->queue_count++;
|
|
|
- retval = execute_queues_cpsch(dqm, false);
|
|
|
+ retval = execute_queues_cpsch(dqm);
|
|
|
}
|
|
|
|
|
|
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
|
|
@@ -857,8 +861,9 @@ static int unmap_sdma_queues(struct device_queue_manager *dqm,
|
|
|
sdma_engine);
|
|
|
}
|
|
|
|
|
|
+/* dqm->lock mutex has to be locked before calling this function */
|
|
|
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
|
|
- bool static_queues_included, bool lock)
|
|
|
+ bool static_queues_included)
|
|
|
{
|
|
|
int retval;
|
|
|
enum kfd_unmap_queues_filter filter;
|
|
@@ -866,10 +871,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
|
|
|
|
|
retval = 0;
|
|
|
|
|
|
- if (lock)
|
|
|
- mutex_lock(&dqm->lock);
|
|
|
if (!dqm->active_runlist)
|
|
|
- goto out;
|
|
|
+ return retval;
|
|
|
|
|
|
pr_debug("Before destroying queues, sdma queue count is : %u\n",
|
|
|
dqm->sdma_queue_count);
|
|
@@ -886,7 +889,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
|
|
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
|
|
|
filter, 0, false, 0);
|
|
|
if (retval)
|
|
|
- goto out;
|
|
|
+ return retval;
|
|
|
|
|
|
*dqm->fence_addr = KFD_FENCE_INIT;
|
|
|
pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
|
|
@@ -898,50 +901,38 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
|
|
pdd = kfd_get_process_device_data(dqm->dev,
|
|
|
kfd_get_process(current));
|
|
|
pdd->reset_wavefronts = true;
|
|
|
- goto out;
|
|
|
+ return retval;
|
|
|
}
|
|
|
pm_release_ib(&dqm->packets);
|
|
|
dqm->active_runlist = false;
|
|
|
|
|
|
-out:
|
|
|
- if (lock)
|
|
|
- mutex_unlock(&dqm->lock);
|
|
|
return retval;
|
|
|
}
|
|
|
|
|
|
-static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
|
|
|
+/* dqm->lock mutex has to be locked before calling this function */
|
|
|
+static int execute_queues_cpsch(struct device_queue_manager *dqm)
|
|
|
{
|
|
|
int retval;
|
|
|
|
|
|
- if (lock)
|
|
|
- mutex_lock(&dqm->lock);
|
|
|
-
|
|
|
- retval = unmap_queues_cpsch(dqm, false, false);
|
|
|
+ retval = unmap_queues_cpsch(dqm, false);
|
|
|
if (retval) {
|
|
|
pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption");
|
|
|
- goto out;
|
|
|
+ return retval;
|
|
|
}
|
|
|
|
|
|
- if (dqm->queue_count <= 0 || dqm->processes_count <= 0) {
|
|
|
- retval = 0;
|
|
|
- goto out;
|
|
|
- }
|
|
|
+ if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
|
|
|
+ return 0;
|
|
|
|
|
|
- if (dqm->active_runlist) {
|
|
|
- retval = 0;
|
|
|
- goto out;
|
|
|
- }
|
|
|
+ if (dqm->active_runlist)
|
|
|
+ return 0;
|
|
|
|
|
|
retval = pm_send_runlist(&dqm->packets, &dqm->queues);
|
|
|
if (retval) {
|
|
|
pr_err("failed to execute runlist");
|
|
|
- goto out;
|
|
|
+ return retval;
|
|
|
}
|
|
|
dqm->active_runlist = true;
|
|
|
|
|
|
-out:
|
|
|
- if (lock)
|
|
|
- mutex_unlock(&dqm->lock);
|
|
|
return retval;
|
|
|
}
|
|
|
|
|
@@ -984,7 +975,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
|
|
|
if (q->properties.is_active)
|
|
|
dqm->queue_count--;
|
|
|
|
|
|
- execute_queues_cpsch(dqm, false);
|
|
|
+ execute_queues_cpsch(dqm);
|
|
|
|
|
|
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
|
|
|
|