|
@@ -121,7 +121,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
|
|
|
|
|
|
/* Kaveri kfd vmid's starts from vmid 8 */
|
|
/* Kaveri kfd vmid's starts from vmid 8 */
|
|
allocated_vmid = bit + KFD_VMID_START_OFFSET;
|
|
allocated_vmid = bit + KFD_VMID_START_OFFSET;
|
|
- pr_debug("kfd: vmid allocation %d\n", allocated_vmid);
|
|
|
|
|
|
+ pr_debug("vmid allocation %d\n", allocated_vmid);
|
|
qpd->vmid = allocated_vmid;
|
|
qpd->vmid = allocated_vmid;
|
|
q->properties.vmid = allocated_vmid;
|
|
q->properties.vmid = allocated_vmid;
|
|
|
|
|
|
@@ -154,13 +154,12 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
|
|
|
|
|
|
BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
|
|
BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
|
|
|
|
|
|
- pr_debug("kfd: In func %s\n", __func__);
|
|
|
|
print_queue(q);
|
|
print_queue(q);
|
|
|
|
|
|
mutex_lock(&dqm->lock);
|
|
mutex_lock(&dqm->lock);
|
|
|
|
|
|
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
|
|
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
|
|
- pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
|
|
|
|
|
|
+ pr_warn("Can't create new usermode queue because %d queues were already created\n",
|
|
dqm->total_queue_count);
|
|
dqm->total_queue_count);
|
|
mutex_unlock(&dqm->lock);
|
|
mutex_unlock(&dqm->lock);
|
|
return -EPERM;
|
|
return -EPERM;
|
|
@@ -240,8 +239,7 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
|
|
if (!set)
|
|
if (!set)
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
|
|
|
|
- pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
|
|
|
|
- __func__, q->pipe, q->queue);
|
|
|
|
|
|
+ pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
|
|
/* horizontal hqd allocation */
|
|
/* horizontal hqd allocation */
|
|
dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
|
|
dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
|
|
|
|
|
|
@@ -278,9 +276,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
|
|
return retval;
|
|
return retval;
|
|
}
|
|
}
|
|
|
|
|
|
- pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
|
|
|
|
- q->pipe,
|
|
|
|
- q->queue);
|
|
|
|
|
|
+ pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
|
|
|
|
+ q->pipe, q->queue);
|
|
|
|
|
|
retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
|
|
retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
|
|
q->queue, (uint32_t __user *) q->properties.write_ptr);
|
|
q->queue, (uint32_t __user *) q->properties.write_ptr);
|
|
@@ -304,8 +301,6 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
|
|
|
|
|
|
retval = 0;
|
|
retval = 0;
|
|
|
|
|
|
- pr_debug("kfd: In Func %s\n", __func__);
|
|
|
|
-
|
|
|
|
mutex_lock(&dqm->lock);
|
|
mutex_lock(&dqm->lock);
|
|
|
|
|
|
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
|
|
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
|
|
@@ -324,7 +319,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
|
|
dqm->sdma_queue_count--;
|
|
dqm->sdma_queue_count--;
|
|
deallocate_sdma_queue(dqm, q->sdma_id);
|
|
deallocate_sdma_queue(dqm, q->sdma_id);
|
|
} else {
|
|
} else {
|
|
- pr_debug("q->properties.type is invalid (%d)\n",
|
|
|
|
|
|
+ pr_debug("q->properties.type %d is invalid\n",
|
|
q->properties.type);
|
|
q->properties.type);
|
|
retval = -EINVAL;
|
|
retval = -EINVAL;
|
|
goto out;
|
|
goto out;
|
|
@@ -403,13 +398,13 @@ static struct mqd_manager *get_mqd_manager_nocpsch(
|
|
|
|
|
|
BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
|
|
BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
|
|
|
|
|
|
- pr_debug("kfd: In func %s mqd type %d\n", __func__, type);
|
|
|
|
|
|
+ pr_debug("mqd type %d\n", type);
|
|
|
|
|
|
mqd = dqm->mqds[type];
|
|
mqd = dqm->mqds[type];
|
|
if (!mqd) {
|
|
if (!mqd) {
|
|
mqd = mqd_manager_init(type, dqm->dev);
|
|
mqd = mqd_manager_init(type, dqm->dev);
|
|
if (mqd == NULL)
|
|
if (mqd == NULL)
|
|
- pr_err("kfd: mqd manager is NULL");
|
|
|
|
|
|
+ pr_err("mqd manager is NULL");
|
|
dqm->mqds[type] = mqd;
|
|
dqm->mqds[type] = mqd;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -424,8 +419,6 @@ static int register_process_nocpsch(struct device_queue_manager *dqm,
|
|
|
|
|
|
BUG_ON(!dqm || !qpd);
|
|
BUG_ON(!dqm || !qpd);
|
|
|
|
|
|
- pr_debug("kfd: In func %s\n", __func__);
|
|
|
|
-
|
|
|
|
n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
|
|
n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
|
|
if (!n)
|
|
if (!n)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
@@ -452,8 +445,6 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm,
|
|
|
|
|
|
BUG_ON(!dqm || !qpd);
|
|
BUG_ON(!dqm || !qpd);
|
|
|
|
|
|
- pr_debug("In func %s\n", __func__);
|
|
|
|
-
|
|
|
|
pr_debug("qpd->queues_list is %s\n",
|
|
pr_debug("qpd->queues_list is %s\n",
|
|
list_empty(&qpd->queues_list) ? "empty" : "not empty");
|
|
list_empty(&qpd->queues_list) ? "empty" : "not empty");
|
|
|
|
|
|
@@ -501,25 +492,13 @@ static void init_interrupts(struct device_queue_manager *dqm)
|
|
dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
|
|
dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
|
|
}
|
|
}
|
|
|
|
|
|
-static int init_scheduler(struct device_queue_manager *dqm)
|
|
|
|
-{
|
|
|
|
- int retval = 0;
|
|
|
|
-
|
|
|
|
- BUG_ON(!dqm);
|
|
|
|
-
|
|
|
|
- pr_debug("kfd: In %s\n", __func__);
|
|
|
|
-
|
|
|
|
- return retval;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static int initialize_nocpsch(struct device_queue_manager *dqm)
|
|
static int initialize_nocpsch(struct device_queue_manager *dqm)
|
|
{
|
|
{
|
|
int pipe, queue;
|
|
int pipe, queue;
|
|
|
|
|
|
BUG_ON(!dqm);
|
|
BUG_ON(!dqm);
|
|
|
|
|
|
- pr_debug("kfd: In func %s num of pipes: %d\n",
|
|
|
|
- __func__, get_pipes_per_mec(dqm));
|
|
|
|
|
|
+ pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
|
|
|
|
|
|
mutex_init(&dqm->lock);
|
|
mutex_init(&dqm->lock);
|
|
INIT_LIST_HEAD(&dqm->queues);
|
|
INIT_LIST_HEAD(&dqm->queues);
|
|
@@ -544,7 +523,6 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
|
|
dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
|
|
dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
|
|
dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
|
|
dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
|
|
|
|
|
|
- init_scheduler(dqm);
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -617,9 +595,9 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
|
|
q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
|
|
q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
|
|
q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;
|
|
q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;
|
|
|
|
|
|
- pr_debug("kfd: sdma id is: %d\n", q->sdma_id);
|
|
|
|
- pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
|
|
|
|
- pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
|
|
|
|
|
|
+ pr_debug("SDMA id is: %d\n", q->sdma_id);
|
|
|
|
+ pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
|
|
|
|
+ pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
|
|
|
|
|
|
dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
|
|
dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
|
|
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
|
|
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
|
|
@@ -651,8 +629,6 @@ static int set_sched_resources(struct device_queue_manager *dqm)
|
|
|
|
|
|
BUG_ON(!dqm);
|
|
BUG_ON(!dqm);
|
|
|
|
|
|
- pr_debug("kfd: In func %s\n", __func__);
|
|
|
|
-
|
|
|
|
res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
|
|
res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
|
|
res.vmid_mask <<= KFD_VMID_START_OFFSET;
|
|
res.vmid_mask <<= KFD_VMID_START_OFFSET;
|
|
|
|
|
|
@@ -682,9 +658,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
|
|
res.gws_mask = res.oac_mask = res.gds_heap_base =
|
|
res.gws_mask = res.oac_mask = res.gds_heap_base =
|
|
res.gds_heap_size = 0;
|
|
res.gds_heap_size = 0;
|
|
|
|
|
|
- pr_debug("kfd: scheduling resources:\n"
|
|
|
|
- " vmid mask: 0x%8X\n"
|
|
|
|
- " queue mask: 0x%8llX\n",
|
|
|
|
|
|
+ pr_debug("Scheduling resources:\n"
|
|
|
|
+ "vmid mask: 0x%8X\n"
|
|
|
|
+ "queue mask: 0x%8llX\n",
|
|
res.vmid_mask, res.queue_mask);
|
|
res.vmid_mask, res.queue_mask);
|
|
|
|
|
|
return pm_send_set_resources(&dqm->packets, &res);
|
|
return pm_send_set_resources(&dqm->packets, &res);
|
|
@@ -696,8 +672,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
|
|
|
|
|
|
BUG_ON(!dqm);
|
|
BUG_ON(!dqm);
|
|
|
|
|
|
- pr_debug("kfd: In func %s num of pipes: %d\n",
|
|
|
|
- __func__, get_pipes_per_mec(dqm));
|
|
|
|
|
|
+ pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
|
|
|
|
|
|
mutex_init(&dqm->lock);
|
|
mutex_init(&dqm->lock);
|
|
INIT_LIST_HEAD(&dqm->queues);
|
|
INIT_LIST_HEAD(&dqm->queues);
|
|
@@ -732,7 +707,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
|
|
if (retval != 0)
|
|
if (retval != 0)
|
|
goto fail_set_sched_resources;
|
|
goto fail_set_sched_resources;
|
|
|
|
|
|
- pr_debug("kfd: allocating fence memory\n");
|
|
|
|
|
|
+ pr_debug("Allocating fence memory\n");
|
|
|
|
|
|
/* allocate fence memory on the gart */
|
|
/* allocate fence memory on the gart */
|
|
retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
|
|
retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
|
|
@@ -786,11 +761,9 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
|
|
{
|
|
{
|
|
BUG_ON(!dqm || !kq || !qpd);
|
|
BUG_ON(!dqm || !kq || !qpd);
|
|
|
|
|
|
- pr_debug("kfd: In func %s\n", __func__);
|
|
|
|
-
|
|
|
|
mutex_lock(&dqm->lock);
|
|
mutex_lock(&dqm->lock);
|
|
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
|
|
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
|
|
- pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
|
|
|
|
|
|
+ pr_warn("Can't create new kernel queue because %d queues were already created\n",
|
|
dqm->total_queue_count);
|
|
dqm->total_queue_count);
|
|
mutex_unlock(&dqm->lock);
|
|
mutex_unlock(&dqm->lock);
|
|
return -EPERM;
|
|
return -EPERM;
|
|
@@ -819,8 +792,6 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
|
|
{
|
|
{
|
|
BUG_ON(!dqm || !kq);
|
|
BUG_ON(!dqm || !kq);
|
|
|
|
|
|
- pr_debug("kfd: In %s\n", __func__);
|
|
|
|
-
|
|
|
|
mutex_lock(&dqm->lock);
|
|
mutex_lock(&dqm->lock);
|
|
/* here we actually preempt the DIQ */
|
|
/* here we actually preempt the DIQ */
|
|
destroy_queues_cpsch(dqm, true, false);
|
|
destroy_queues_cpsch(dqm, true, false);
|
|
@@ -862,7 +833,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
|
|
mutex_lock(&dqm->lock);
|
|
mutex_lock(&dqm->lock);
|
|
|
|
|
|
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
|
|
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
|
|
- pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
|
|
|
|
|
|
+ pr_warn("Can't create new usermode queue because %d queues were already created\n",
|
|
dqm->total_queue_count);
|
|
dqm->total_queue_count);
|
|
retval = -EPERM;
|
|
retval = -EPERM;
|
|
goto out;
|
|
goto out;
|
|
@@ -916,7 +887,7 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
|
|
|
|
|
|
while (*fence_addr != fence_value) {
|
|
while (*fence_addr != fence_value) {
|
|
if (time_after(jiffies, timeout)) {
|
|
if (time_after(jiffies, timeout)) {
|
|
- pr_err("kfd: qcm fence wait loop timeout expired\n");
|
|
|
|
|
|
+ pr_err("qcm fence wait loop timeout expired\n");
|
|
return -ETIME;
|
|
return -ETIME;
|
|
}
|
|
}
|
|
schedule();
|
|
schedule();
|
|
@@ -949,7 +920,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
|
|
if (!dqm->active_runlist)
|
|
if (!dqm->active_runlist)
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
- pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
|
|
|
|
|
|
+ pr_debug("Before destroying queues, sdma queue count is : %u\n",
|
|
dqm->sdma_queue_count);
|
|
dqm->sdma_queue_count);
|
|
|
|
|
|
if (dqm->sdma_queue_count > 0) {
|
|
if (dqm->sdma_queue_count > 0) {
|
|
@@ -998,7 +969,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
|
|
|
|
|
|
retval = destroy_queues_cpsch(dqm, false, false);
|
|
retval = destroy_queues_cpsch(dqm, false, false);
|
|
if (retval != 0) {
|
|
if (retval != 0) {
|
|
- pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
|
|
|
|
|
|
+ pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption");
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1014,7 +985,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
|
|
|
|
|
|
retval = pm_send_runlist(&dqm->packets, &dqm->queues);
|
|
retval = pm_send_runlist(&dqm->packets, &dqm->queues);
|
|
if (retval != 0) {
|
|
if (retval != 0) {
|
|
- pr_err("kfd: failed to execute runlist");
|
|
|
|
|
|
+ pr_err("failed to execute runlist");
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
dqm->active_runlist = true;
|
|
dqm->active_runlist = true;
|
|
@@ -1106,8 +1077,6 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
|
|
{
|
|
{
|
|
bool retval;
|
|
bool retval;
|
|
|
|
|
|
- pr_debug("kfd: In func %s\n", __func__);
|
|
|
|
-
|
|
|
|
mutex_lock(&dqm->lock);
|
|
mutex_lock(&dqm->lock);
|
|
|
|
|
|
if (alternate_aperture_size == 0) {
|
|
if (alternate_aperture_size == 0) {
|
|
@@ -1152,7 +1121,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
|
|
if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
|
|
if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
|
|
program_sh_mem_settings(dqm, qpd);
|
|
program_sh_mem_settings(dqm, qpd);
|
|
|
|
|
|
- pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
|
|
|
|
|
|
+ pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
|
|
qpd->sh_mem_config, qpd->sh_mem_ape1_base,
|
|
qpd->sh_mem_config, qpd->sh_mem_ape1_base,
|
|
qpd->sh_mem_ape1_limit);
|
|
qpd->sh_mem_ape1_limit);
|
|
|
|
|
|
@@ -1170,7 +1139,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
|
|
|
|
|
|
BUG_ON(!dev);
|
|
BUG_ON(!dev);
|
|
|
|
|
|
- pr_debug("kfd: loading device queue manager\n");
|
|
|
|
|
|
+ pr_debug("Loading device queue manager\n");
|
|
|
|
|
|
dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
|
|
dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
|
|
if (!dqm)
|
|
if (!dqm)
|