Browse Source

drm/mediatek: set DMA max segment size

[ Upstream commit 070955558e820b9a89c570b91b1f21762f62b288 ]

This driver requires imported PRIME buffers to appear contiguously in
its IO address space. Make sure this is the case by setting the maximum
DMA segment size to a more suitable value than the default 64KB.

Signed-off-by: Alexandre Courbot <acourbot@chromium.org>
Reviewed-by: Tomasz Figa <tfiga@chromium.org>
Signed-off-by: CK Hu <ck.hu@mediatek.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Alexandre Courbot 6 years ago
parent
commit
b3496367d9
2 changed files with 35 additions and 2 deletions
  1. 33 2
      drivers/gpu/drm/mediatek/mtk_drm_drv.c
  2. 2 0
      drivers/gpu/drm/mediatek/mtk_drm_drv.h

+ 33 - 2
drivers/gpu/drm/mediatek/mtk_drm_drv.c

@@ -220,6 +220,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
 	struct mtk_drm_private *private = drm->dev_private;
 	struct mtk_drm_private *private = drm->dev_private;
 	struct platform_device *pdev;
 	struct platform_device *pdev;
 	struct device_node *np;
 	struct device_node *np;
+	struct device *dma_dev;
 	int ret;
 	int ret;
 
 
 	if (!iommu_present(&platform_bus_type))
 	if (!iommu_present(&platform_bus_type))
@@ -282,7 +283,29 @@ static int mtk_drm_kms_init(struct drm_device *drm)
 		goto err_component_unbind;
 		goto err_component_unbind;
 	}
 	}
 
 
-	private->dma_dev = &pdev->dev;
+	dma_dev = &pdev->dev;
+	private->dma_dev = dma_dev;
+
+	/*
+	 * Configure the DMA segment size to make sure we get contiguous IOVA
+	 * when importing PRIME buffers.
+	 */
+	if (!dma_dev->dma_parms) {
+		private->dma_parms_allocated = true;
+		dma_dev->dma_parms =
+			devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
+				     GFP_KERNEL);
+	}
+	if (!dma_dev->dma_parms) {
+		ret = -ENOMEM;
+		goto err_component_unbind;
+	}
+
+	ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
+	if (ret) {
+		dev_err(dma_dev, "Failed to set DMA segment size\n");
+		goto err_unset_dma_parms;
+	}
 
 
 	/*
 	/*
 	 * We don't use the drm_irq_install() helpers provided by the DRM
 	 * We don't use the drm_irq_install() helpers provided by the DRM
@@ -292,13 +315,16 @@ static int mtk_drm_kms_init(struct drm_device *drm)
 	drm->irq_enabled = true;
 	drm->irq_enabled = true;
 	ret = drm_vblank_init(drm, MAX_CRTC);
 	ret = drm_vblank_init(drm, MAX_CRTC);
 	if (ret < 0)
 	if (ret < 0)
-		goto err_component_unbind;
+		goto err_unset_dma_parms;
 
 
 	drm_kms_helper_poll_init(drm);
 	drm_kms_helper_poll_init(drm);
 	drm_mode_config_reset(drm);
 	drm_mode_config_reset(drm);
 
 
 	return 0;
 	return 0;
 
 
+err_unset_dma_parms:
+	if (private->dma_parms_allocated)
+		dma_dev->dma_parms = NULL;
 err_component_unbind:
 err_component_unbind:
 	component_unbind_all(drm->dev, drm);
 	component_unbind_all(drm->dev, drm);
 err_config_cleanup:
 err_config_cleanup:
@@ -309,9 +335,14 @@ err_config_cleanup:
 
 
 static void mtk_drm_kms_deinit(struct drm_device *drm)
 static void mtk_drm_kms_deinit(struct drm_device *drm)
 {
 {
+	struct mtk_drm_private *private = drm->dev_private;
+
 	drm_kms_helper_poll_fini(drm);
 	drm_kms_helper_poll_fini(drm);
 	drm_atomic_helper_shutdown(drm);
 	drm_atomic_helper_shutdown(drm);
 
 
+	if (private->dma_parms_allocated)
+		private->dma_dev->dma_parms = NULL;
+
 	component_unbind_all(drm->dev, drm);
 	component_unbind_all(drm->dev, drm);
 	drm_mode_config_cleanup(drm);
 	drm_mode_config_cleanup(drm);
 }
 }

+ 2 - 0
drivers/gpu/drm/mediatek/mtk_drm_drv.h

@@ -59,6 +59,8 @@ struct mtk_drm_private {
 	} commit;
 	} commit;
 
 
 	struct drm_atomic_state *suspend_state;
 	struct drm_atomic_state *suspend_state;
+
+	bool dma_parms_allocated;
 };
 };
 
 
 extern struct platform_driver mtk_ddp_driver;
 extern struct platform_driver mtk_ddp_driver;