|
@@ -163,7 +163,7 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
|
|
|
num_queues = 0;
|
|
|
list_for_each_entry(cur, &qpd->queues_list, list)
|
|
|
num_queues++;
|
|
|
- packet->bitfields10.num_queues = num_queues;
|
|
|
+ packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : num_queues;
|
|
|
|
|
|
packet->sh_mem_config = qpd->sh_mem_config;
|
|
|
packet->sh_mem_bases = qpd->sh_mem_bases;
|
|
@@ -177,9 +177,10 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
|
|
|
}
|
|
|
|
|
|
static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
|
|
|
- struct queue *q)
|
|
|
+ struct queue *q, bool is_static)
|
|
|
{
|
|
|
struct pm4_map_queues *packet;
|
|
|
+ bool use_static = is_static;
|
|
|
|
|
|
BUG_ON(!pm || !buffer || !q);
|
|
|
|
|
@@ -209,6 +210,7 @@ static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
|
|
|
case KFD_QUEUE_TYPE_SDMA:
|
|
|
packet->bitfields2.engine_sel =
|
|
|
engine_sel__mes_map_queues__sdma0;
|
|
|
+ use_static = false; /* no static queues under SDMA */
|
|
|
break;
|
|
|
default:
|
|
|
BUG();
|
|
@@ -218,6 +220,9 @@ static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
|
|
|
packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset =
|
|
|
q->properties.doorbell_off;
|
|
|
|
|
|
+ packet->mes_map_queues_ordinals[0].bitfields3.is_static =
|
|
|
+ (use_static == true) ? 1 : 0;
|
|
|
+
|
|
|
packet->mes_map_queues_ordinals[0].mqd_addr_lo =
|
|
|
lower_32_bits(q->gart_mqd_addr);
|
|
|
|
|
@@ -271,9 +276,11 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
|
|
|
pm_release_ib(pm);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
+
|
|
|
retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
|
|
|
if (retval != 0)
|
|
|
return retval;
|
|
|
+
|
|
|
proccesses_mapped++;
|
|
|
inc_wptr(&rl_wptr, sizeof(struct pm4_map_process),
|
|
|
alloc_size_bytes);
|
|
@@ -281,23 +288,36 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
|
|
|
list_for_each_entry(kq, &qpd->priv_queue_list, list) {
|
|
|
if (kq->queue->properties.is_active != true)
|
|
|
continue;
|
|
|
+
|
|
|
+ pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n",
|
|
|
+ kq->queue->queue, qpd->is_debug);
|
|
|
+
|
|
|
retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
|
|
|
- kq->queue);
|
|
|
+ kq->queue, qpd->is_debug);
|
|
|
if (retval != 0)
|
|
|
return retval;
|
|
|
- inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
|
|
|
- alloc_size_bytes);
|
|
|
+
|
|
|
+ inc_wptr(&rl_wptr,
|
|
|
+ sizeof(struct pm4_map_queues),
|
|
|
+ alloc_size_bytes);
|
|
|
}
|
|
|
|
|
|
list_for_each_entry(q, &qpd->queues_list, list) {
|
|
|
if (q->properties.is_active != true)
|
|
|
continue;
|
|
|
- retval = pm_create_map_queue(pm,
|
|
|
- &rl_buffer[rl_wptr], q);
|
|
|
+
|
|
|
+ pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n",
|
|
|
+ q->queue, qpd->is_debug);
|
|
|
+
|
|
|
+ retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
|
|
|
+ q, qpd->is_debug);
|
|
|
+
|
|
|
if (retval != 0)
|
|
|
return retval;
|
|
|
- inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
|
|
|
- alloc_size_bytes);
|
|
|
+
|
|
|
+ inc_wptr(&rl_wptr,
|
|
|
+ sizeof(struct pm4_map_queues),
|
|
|
+ alloc_size_bytes);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -488,7 +508,8 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
|
|
|
|
|
|
packet = (struct pm4_unmap_queues *)buffer;
|
|
|
memset(buffer, 0, sizeof(struct pm4_unmap_queues));
|
|
|
-
|
|
|
+ pr_debug("kfd: static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n",
|
|
|
+ mode, reset, type);
|
|
|
packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES,
|
|
|
sizeof(struct pm4_unmap_queues));
|
|
|
switch (type) {
|
|
@@ -529,6 +550,11 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
|
|
|
packet->bitfields2.queue_sel =
|
|
|
queue_sel__mes_unmap_queues__perform_request_on_all_active_queues;
|
|
|
break;
|
|
|
+ case KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES:
|
|
|
+ /* in this case, we do not preempt static queues */
|
|
|
+ packet->bitfields2.queue_sel =
|
|
|
+ queue_sel__mes_unmap_queues__perform_request_on_dynamic_queues_only;
|
|
|
+ break;
|
|
|
default:
|
|
|
BUG();
|
|
|
break;
|