|
|
@@ -1116,6 +1116,26 @@ out:
|
|
|
return retval;
|
|
|
}
|
|
|
|
|
|
+static int set_trap_handler(struct device_queue_manager *dqm,
|
|
|
+ struct qcm_process_device *qpd,
|
|
|
+ uint64_t tba_addr,
|
|
|
+ uint64_t tma_addr)
|
|
|
+{
|
|
|
+ uint64_t *tma;
|
|
|
+
|
|
|
+ if (dqm->dev->cwsr_enabled) {
|
|
|
+ /* Jump from CWSR trap handler to user trap */
|
|
|
+ tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
|
|
|
+ tma[0] = tba_addr;
|
|
|
+ tma[1] = tma_addr;
|
|
|
+ } else {
|
|
|
+ qpd->tba_addr = tba_addr;
|
|
|
+ qpd->tma_addr = tma_addr;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int process_termination_nocpsch(struct device_queue_manager *dqm,
|
|
|
struct qcm_process_device *qpd)
|
|
|
{
|
|
|
@@ -1247,6 +1267,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
|
|
|
dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
|
|
|
dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
|
|
|
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
|
|
|
+ dqm->ops.set_trap_handler = set_trap_handler;
|
|
|
dqm->ops.process_termination = process_termination_cpsch;
|
|
|
break;
|
|
|
case KFD_SCHED_POLICY_NO_HWS:
|
|
|
@@ -1262,6 +1283,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
|
|
|
dqm->ops.initialize = initialize_nocpsch;
|
|
|
dqm->ops.uninitialize = uninitialize;
|
|
|
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
|
|
|
+ dqm->ops.set_trap_handler = set_trap_handler;
|
|
|
dqm->ops.process_termination = process_termination_nocpsch;
|
|
|
break;
|
|
|
default:
|