|
@@ -487,6 +487,47 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
|
|
|
+{
|
|
|
+ u32 pmc, ppc;
|
|
|
+
|
|
|
+ /* enable clock gating */
|
|
|
+ ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
|
|
|
+ ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
|
|
|
+
|
|
|
+ /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
|
|
|
+ if (gpu->identity.revision == 0x4301 ||
|
|
|
+ gpu->identity.revision == 0x4302)
|
|
|
+ ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
|
|
|
+
|
|
|
+ gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
|
|
|
+
|
|
|
+ pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
|
|
|
+
|
|
|
+ /* Disable PA clock gating for GC400+ except for GC420 */
|
|
|
+ if (gpu->identity.model >= chipModel_GC400 &&
|
|
|
+ gpu->identity.model != chipModel_GC420)
|
|
|
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Disable PE clock gating on revs < 5.0.0.0 when HZ is
|
|
|
+ * present without a bug fix.
|
|
|
+ */
|
|
|
+ if (gpu->identity.revision < 0x5000 &&
|
|
|
+ gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
|
|
|
+ !(gpu->identity.minor_features1 &
|
|
|
+ chipMinorFeatures1_DISABLE_PE_GATING))
|
|
|
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
|
|
|
+
|
|
|
+ if (gpu->identity.revision < 0x5422)
|
|
|
+ pmc |= BIT(15); /* Unknown bit */
|
|
|
+
|
|
|
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
|
|
|
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
|
|
|
+
|
|
|
+ gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
|
|
|
+}
|
|
|
+
|
|
|
static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
|
|
|
{
|
|
|
u16 prefetch;
|
|
@@ -506,6 +547,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
|
|
|
gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
|
|
|
}
|
|
|
|
|
|
+ /* enable module-level clock gating */
|
|
|
+ etnaviv_gpu_enable_mlcg(gpu);
|
|
|
+
|
|
|
/*
|
|
|
* Update GPU AXI cache atttribute to "cacheable, no allocate".
|
|
|
* This is necessary to prevent the iMX6 SoC locking up.
|
|
@@ -553,8 +597,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
|
|
|
bool mmuv2;
|
|
|
|
|
|
ret = pm_runtime_get_sync(gpu->dev);
|
|
|
- if (ret < 0)
|
|
|
+ if (ret < 0) {
|
|
|
+ dev_err(gpu->dev, "Failed to enable GPU power domain\n");
|
|
|
return ret;
|
|
|
+ }
|
|
|
|
|
|
etnaviv_hw_identify(gpu);
|
|
|
|
|
@@ -591,8 +637,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
|
|
|
}
|
|
|
|
|
|
ret = etnaviv_hw_reset(gpu);
|
|
|
- if (ret)
|
|
|
+ if (ret) {
|
|
|
+ dev_err(gpu->dev, "GPU reset failed\n");
|
|
|
goto fail;
|
|
|
+ }
|
|
|
|
|
|
/* Setup IOMMU.. eventually we will (I think) do this once per context
|
|
|
* and have separate page tables per context. For now, to keep things
|
|
@@ -610,12 +658,14 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
|
|
|
}
|
|
|
|
|
|
if (!iommu) {
|
|
|
+ dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
|
|
|
ret = -ENOMEM;
|
|
|
goto fail;
|
|
|
}
|
|
|
|
|
|
gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
|
|
|
if (!gpu->mmu) {
|
|
|
+ dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
|
|
|
iommu_domain_free(iommu);
|
|
|
ret = -ENOMEM;
|
|
|
goto fail;
|