Browse Source

drm/amdkfd: Reorganize kfd resume code

The idea is to let kfd init and resume function share the same code path
as much as possible, rather than to have two copies of almost identical
code. That way improves the code readability and maintainability.

Signed-off-by: Yong Zhao <yong.zhao@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
Yong Zhao 8 years ago
parent
commit
b8935a7c4b
1 changed files with 40 additions and 38 deletions
  1. 40 38
      drivers/gpu/drm/amd/amdkfd/kfd_device.c

+ 40 - 38
drivers/gpu/drm/amd/amdkfd/kfd_device.c

@@ -92,6 +92,8 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
 				unsigned int chunk_size);
 				unsigned int chunk_size);
 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
 
 
+static int kfd_resume(struct kfd_dev *kfd);
+
 static const struct kfd_device_info *lookup_device_info(unsigned short did)
 static const struct kfd_device_info *lookup_device_info(unsigned short did)
 {
 {
 	size_t i;
 	size_t i;
@@ -169,15 +171,8 @@ static bool device_iommu_pasid_init(struct kfd_dev *kfd)
 			(unsigned int)(1 << kfd->device_info->max_pasid_bits),
 			(unsigned int)(1 << kfd->device_info->max_pasid_bits),
 			iommu_info.max_pasids);
 			iommu_info.max_pasids);
 
 
-	err = amd_iommu_init_device(kfd->pdev, pasid_limit);
-	if (err < 0) {
-		dev_err(kfd_device, "error initializing iommu device\n");
-		return false;
-	}
-
 	if (!kfd_set_pasid_limit(pasid_limit)) {
 	if (!kfd_set_pasid_limit(pasid_limit)) {
 		dev_err(kfd_device, "error setting pasid limit\n");
 		dev_err(kfd_device, "error setting pasid limit\n");
-		amd_iommu_free_device(kfd->pdev);
 		return false;
 		return false;
 	}
 	}
 
 
@@ -273,29 +268,22 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 		goto kfd_interrupt_error;
 		goto kfd_interrupt_error;
 	}
 	}
 
 
-	if (!device_iommu_pasid_init(kfd)) {
-		dev_err(kfd_device,
-			"Error initializing iommuv2 for device %x:%x\n",
-			kfd->pdev->vendor, kfd->pdev->device);
-		goto device_iommu_pasid_error;
-	}
-	amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
-						iommu_pasid_shutdown_callback);
-	amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb);
-
 	kfd->dqm = device_queue_manager_init(kfd);
 	kfd->dqm = device_queue_manager_init(kfd);
 	if (!kfd->dqm) {
 	if (!kfd->dqm) {
 		dev_err(kfd_device, "Error initializing queue manager\n");
 		dev_err(kfd_device, "Error initializing queue manager\n");
 		goto device_queue_manager_error;
 		goto device_queue_manager_error;
 	}
 	}
 
 
-	if (kfd->dqm->ops.start(kfd->dqm)) {
+	if (!device_iommu_pasid_init(kfd)) {
 		dev_err(kfd_device,
 		dev_err(kfd_device,
-			"Error starting queue manager for device %x:%x\n",
+			"Error initializing iommuv2 for device %x:%x\n",
 			kfd->pdev->vendor, kfd->pdev->device);
 			kfd->pdev->vendor, kfd->pdev->device);
-		goto dqm_start_error;
+		goto device_iommu_pasid_error;
 	}
 	}
 
 
+	if (kfd_resume(kfd))
+		goto kfd_resume_error;
+
 	kfd->dbgmgr = NULL;
 	kfd->dbgmgr = NULL;
 
 
 	kfd->init_complete = true;
 	kfd->init_complete = true;
@@ -307,11 +295,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 
 
 	goto out;
 	goto out;
 
 
-dqm_start_error:
+kfd_resume_error:
+device_iommu_pasid_error:
 	device_queue_manager_uninit(kfd->dqm);
 	device_queue_manager_uninit(kfd->dqm);
 device_queue_manager_error:
 device_queue_manager_error:
-	amd_iommu_free_device(kfd->pdev);
-device_iommu_pasid_error:
 	kfd_interrupt_exit(kfd);
 	kfd_interrupt_exit(kfd);
 kfd_interrupt_error:
 kfd_interrupt_error:
 	kfd_topology_remove_device(kfd);
 	kfd_topology_remove_device(kfd);
@@ -331,8 +318,8 @@ out:
 void kgd2kfd_device_exit(struct kfd_dev *kfd)
 void kgd2kfd_device_exit(struct kfd_dev *kfd)
 {
 {
 	if (kfd->init_complete) {
 	if (kfd->init_complete) {
+		kgd2kfd_suspend(kfd);
 		device_queue_manager_uninit(kfd->dqm);
 		device_queue_manager_uninit(kfd->dqm);
-		amd_iommu_free_device(kfd->pdev);
 		kfd_interrupt_exit(kfd);
 		kfd_interrupt_exit(kfd);
 		kfd_topology_remove_device(kfd);
 		kfd_topology_remove_device(kfd);
 		kfd_doorbell_fini(kfd);
 		kfd_doorbell_fini(kfd);
@@ -355,25 +342,40 @@ void kgd2kfd_suspend(struct kfd_dev *kfd)
 
 
 int kgd2kfd_resume(struct kfd_dev *kfd)
 int kgd2kfd_resume(struct kfd_dev *kfd)
 {
 {
-	unsigned int pasid_limit;
-	int err;
+	if (!kfd->init_complete)
+		return 0;
 
 
-	pasid_limit = kfd_get_pasid_limit();
+	return kfd_resume(kfd);
 
 
-	if (kfd->init_complete) {
-		err = amd_iommu_init_device(kfd->pdev, pasid_limit);
-		if (err < 0) {
-			dev_err(kfd_device, "failed to initialize iommu\n");
-			return -ENXIO;
-		}
+}
 
 
-		amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
-						iommu_pasid_shutdown_callback);
-		amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb);
-		kfd->dqm->ops.start(kfd->dqm);
+static int kfd_resume(struct kfd_dev *kfd)
+{
+	int err = 0;
+	unsigned int pasid_limit = kfd_get_pasid_limit();
+
+	err = amd_iommu_init_device(kfd->pdev, pasid_limit);
+	if (err)
+		return -ENXIO;
+	amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
+					iommu_pasid_shutdown_callback);
+	amd_iommu_set_invalid_ppr_cb(kfd->pdev,
+				     iommu_invalid_ppr_cb);
+
+	err = kfd->dqm->ops.start(kfd->dqm);
+	if (err) {
+		dev_err(kfd_device,
+			"Error starting queue manager for device %x:%x\n",
+			kfd->pdev->vendor, kfd->pdev->device);
+		goto dqm_start_error;
 	}
 	}
 
 
-	return 0;
+	return err;
+
+dqm_start_error:
+	amd_iommu_free_device(kfd->pdev);
+
+	return err;
 }
 }
 
 
 /* This is called directly from KGD at ISR. */
 /* This is called directly from KGD at ISR. */