|
@@ -220,6 +220,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
|
|
|
struct mtk_drm_private *private = drm->dev_private;
|
|
struct mtk_drm_private *private = drm->dev_private;
|
|
|
struct platform_device *pdev;
|
|
struct platform_device *pdev;
|
|
|
struct device_node *np;
|
|
struct device_node *np;
|
|
|
|
|
+ struct device *dma_dev;
|
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
|
|
if (!iommu_present(&platform_bus_type))
|
|
if (!iommu_present(&platform_bus_type))
|
|
@@ -282,7 +283,29 @@ static int mtk_drm_kms_init(struct drm_device *drm)
|
|
|
goto err_component_unbind;
|
|
goto err_component_unbind;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- private->dma_dev = &pdev->dev;
|
|
|
|
|
|
|
+ dma_dev = &pdev->dev;
|
|
|
|
|
+ private->dma_dev = dma_dev;
|
|
|
|
|
+
|
|
|
|
|
+ /*
|
|
|
|
|
+ * Configure the DMA segment size to make sure we get contiguous IOVA
|
|
|
|
|
+ * when importing PRIME buffers.
|
|
|
|
|
+ */
|
|
|
|
|
+ if (!dma_dev->dma_parms) {
|
|
|
|
|
+ private->dma_parms_allocated = true;
|
|
|
|
|
+ dma_dev->dma_parms =
|
|
|
|
|
+ devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
|
|
|
|
|
+ GFP_KERNEL);
|
|
|
|
|
+ }
|
|
|
|
|
+ if (!dma_dev->dma_parms) {
|
|
|
|
|
+ ret = -ENOMEM;
|
|
|
|
|
+ goto err_component_unbind;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
|
|
|
|
|
+ if (ret) {
|
|
|
|
|
+ dev_err(dma_dev, "Failed to set DMA segment size\n");
|
|
|
|
|
+ goto err_unset_dma_parms;
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
|
* We don't use the drm_irq_install() helpers provided by the DRM
|
|
* We don't use the drm_irq_install() helpers provided by the DRM
|
|
@@ -292,13 +315,16 @@ static int mtk_drm_kms_init(struct drm_device *drm)
|
|
|
drm->irq_enabled = true;
|
|
drm->irq_enabled = true;
|
|
|
ret = drm_vblank_init(drm, MAX_CRTC);
|
|
ret = drm_vblank_init(drm, MAX_CRTC);
|
|
|
if (ret < 0)
|
|
if (ret < 0)
|
|
|
- goto err_component_unbind;
|
|
|
|
|
|
|
+ goto err_unset_dma_parms;
|
|
|
|
|
|
|
|
drm_kms_helper_poll_init(drm);
|
|
drm_kms_helper_poll_init(drm);
|
|
|
drm_mode_config_reset(drm);
|
|
drm_mode_config_reset(drm);
|
|
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
+err_unset_dma_parms:
|
|
|
|
|
+ if (private->dma_parms_allocated)
|
|
|
|
|
+ dma_dev->dma_parms = NULL;
|
|
|
err_component_unbind:
|
|
err_component_unbind:
|
|
|
component_unbind_all(drm->dev, drm);
|
|
component_unbind_all(drm->dev, drm);
|
|
|
err_config_cleanup:
|
|
err_config_cleanup:
|
|
@@ -309,9 +335,14 @@ err_config_cleanup:
|
|
|
|
|
|
|
|
static void mtk_drm_kms_deinit(struct drm_device *drm)
|
|
static void mtk_drm_kms_deinit(struct drm_device *drm)
|
|
|
{
|
|
{
|
|
|
|
|
+ struct mtk_drm_private *private = drm->dev_private;
|
|
|
|
|
+
|
|
|
drm_kms_helper_poll_fini(drm);
|
|
drm_kms_helper_poll_fini(drm);
|
|
|
drm_atomic_helper_shutdown(drm);
|
|
drm_atomic_helper_shutdown(drm);
|
|
|
|
|
|
|
|
|
|
+ if (private->dma_parms_allocated)
|
|
|
|
|
+ private->dma_dev->dma_parms = NULL;
|
|
|
|
|
+
|
|
|
component_unbind_all(drm->dev, drm);
|
|
component_unbind_all(drm->dev, drm);
|
|
|
drm_mode_config_cleanup(drm);
|
|
drm_mode_config_cleanup(drm);
|
|
|
}
|
|
}
|