Przeglądaj źródła

Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next-queued

Chris Wilson needs the new drm_driver->release callback to make sure
the shiny new dma-buf testcases don't oops the driver on unload.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Daniel Vetter 8 lat temu
rodzic
commit
51a831a772
100 zmienionych plików z 2917 dodań i 1395 usunięć
  1. 35 0
      Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
  2. 12 26
      Documentation/devicetree/bindings/display/msm/gpu.txt
  3. 11 4
      Documentation/gpu/drm-mm.rst
  4. 1 0
      MAINTAINERS
  5. 1 0
      drivers/gpu/drm/amd/amdgpu/amdgpu.h
  6. 27 6
      drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
  7. 1 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
  8. 0 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
  9. 18 36
      drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
  10. 7 9
      drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
  11. 12 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
  12. 11 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
  13. 9 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
  14. 9 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
  15. 0 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
  16. 8 12
      drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
  17. 14 15
      drivers/gpu/drm/amd/amdgpu/ci_dpm.c
  18. 2 2
      drivers/gpu/drm/amd/amdgpu/cik.c
  19. 87 129
      drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
  20. 15 53
      drivers/gpu/drm/amd/amdgpu/kv_dpm.c
  21. 73 17
      drivers/gpu/drm/amd/amdgpu/si.c
  22. 2 2
      drivers/gpu/drm/amd/amdgpu/si_enums.h
  23. 86 42
      drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
  24. 10 14
      drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
  25. 8 13
      drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
  26. 234 217
      drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
  27. 6 11
      drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
  28. 1 0
      drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
  29. 29 45
      drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
  30. 13 10
      drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
  31. 10 12
      drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
  32. 2 0
      drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
  33. 1 2
      drivers/gpu/drm/arc/arcpgu_drv.c
  34. 1 1
      drivers/gpu/drm/arm/hdlcd_drv.c
  35. 1 1
      drivers/gpu/drm/arm/malidp_drv.c
  36. 1 1
      drivers/gpu/drm/armada/armada_fbdev.c
  37. 2 2
      drivers/gpu/drm/armada/armada_gem.c
  38. 1 2
      drivers/gpu/drm/ast/ast_fb.c
  39. 0 1
      drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
  40. 10 0
      drivers/gpu/drm/bochs/bochs_drv.c
  41. 1 2
      drivers/gpu/drm/bochs/bochs_fbdev.c
  42. 815 134
      drivers/gpu/drm/bridge/sil-sii8620.c
  43. 34 16
      drivers/gpu/drm/bridge/sil-sii8620.h
  44. 1 1
      drivers/gpu/drm/cirrus/cirrus_fbdev.c
  45. 2 3
      drivers/gpu/drm/drm_atomic.c
  46. 1 1
      drivers/gpu/drm/drm_atomic_helper.c
  47. 24 0
      drivers/gpu/drm/drm_color_mgmt.c
  48. 3 0
      drivers/gpu/drm/drm_crtc_internal.h
  49. 45 20
      drivers/gpu/drm/drm_drv.c
  50. 5 3
      drivers/gpu/drm/drm_edid.c
  51. 7 8
      drivers/gpu/drm/drm_fb_cma_helper.c
  52. 8 5
      drivers/gpu/drm/drm_fb_helper.c
  53. 272 216
      drivers/gpu/drm/drm_mm.c
  54. 2 6
      drivers/gpu/drm/drm_modes.c
  55. 1 2
      drivers/gpu/drm/drm_vma_manager.c
  56. 1 0
      drivers/gpu/drm/etnaviv/Makefile
  57. 8 6
      drivers/gpu/drm/etnaviv/etnaviv_buffer.c
  58. 6 0
      drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
  59. 153 0
      drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
  60. 58 0
      drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
  61. 3 2
      drivers/gpu/drm/etnaviv/etnaviv_drv.c
  62. 3 3
      drivers/gpu/drm/etnaviv/etnaviv_dump.c
  63. 5 3
      drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
  64. 54 41
      drivers/gpu/drm/etnaviv/etnaviv_gpu.c
  65. 2 26
      drivers/gpu/drm/etnaviv/etnaviv_gpu.h
  66. 1 1
      drivers/gpu/drm/etnaviv/etnaviv_iommu.c
  67. 4 2
      drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
  68. 31 38
      drivers/gpu/drm/etnaviv/etnaviv_mmu.c
  69. 6 4
      drivers/gpu/drm/etnaviv/etnaviv_mmu.h
  70. 71 21
      drivers/gpu/drm/exynos/exynos5433_drm_decon.c
  71. 1 4
      drivers/gpu/drm/exynos/exynos_drm_fbdev.c
  72. 0 2
      drivers/gpu/drm/exynos/exynos_drm_fimd.c
  73. 1 1
      drivers/gpu/drm/exynos/exynos_drm_g2d.c
  74. 63 17
      drivers/gpu/drm/exynos/exynos_hdmi.c
  75. 1 1
      drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
  76. 7 5
      drivers/gpu/drm/fsl-dcu/fsl_tcon.c
  77. 1 1
      drivers/gpu/drm/gma500/framebuffer.c
  78. 1 2
      drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
  79. 1 2
      drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
  80. 4 6
      drivers/gpu/drm/i915/i915_gem.c
  81. 7 2
      drivers/gpu/drm/i915/i915_gem_evict.c
  82. 2 3
      drivers/gpu/drm/i915/i915_gem_execbuffer.c
  83. 16 23
      drivers/gpu/drm/i915/i915_gem_gtt.c
  84. 3 3
      drivers/gpu/drm/i915/i915_gem_stolen.c
  85. 1 2
      drivers/gpu/drm/i915/intel_fbdev.c
  86. 1 2
      drivers/gpu/drm/imx/imx-drm-core.c
  87. 3 3
      drivers/gpu/drm/meson/Makefile
  88. 1 3
      drivers/gpu/drm/meson/meson_drv.c
  89. 1 1
      drivers/gpu/drm/mgag200/mgag200_fb.c
  90. 7 0
      drivers/gpu/drm/msm/Kconfig
  91. 2 0
      drivers/gpu/drm/msm/Makefile
  92. 19 2
      drivers/gpu/drm/msm/adreno/a5xx_gpu.c
  93. 46 16
      drivers/gpu/drm/msm/adreno/adreno_device.c
  94. 0 1
      drivers/gpu/drm/msm/adreno/adreno_gpu.c
  95. 1 3
      drivers/gpu/drm/msm/adreno/adreno_gpu.h
  96. 6 12
      drivers/gpu/drm/msm/dsi/dsi.c
  97. 41 10
      drivers/gpu/drm/msm/dsi/dsi.h
  98. 256 13
      drivers/gpu/drm/msm/dsi/dsi.xml.h
  99. 25 0
      drivers/gpu/drm/msm/dsi/dsi_cfg.c
  100. 1 0
      drivers/gpu/drm/msm/dsi/dsi_cfg.h

+ 35 - 0
Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt

@@ -56,6 +56,18 @@ Required properties for V3D:
 - interrupts:	The interrupt number
 - interrupts:	The interrupt number
 		  See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
 		  See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
 
 
+Required properties for DSI:
+- compatible:	Should be "brcm,bcm2835-dsi0" or "brcm,bcm2835-dsi1"
+- reg:		Physical base address and length of the DSI block's registers
+- interrupts:	The interrupt number
+		  See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
+- clocks:	a) phy: The DSI PLL clock feeding the DSI analog PHY
+		b) escape: The DSI ESC clock from CPRMAN
+		c) pixel: The DSI pixel clock from CPRMAN
+- clock-output-names:
+		The 3 clocks output from the DSI analog PHY: dsi[01]_byte,
+		dsi[01]_ddr2, and dsi[01]_ddr
+
 [1] Documentation/devicetree/bindings/media/video-interfaces.txt
 [1] Documentation/devicetree/bindings/media/video-interfaces.txt
 
 
 Example:
 Example:
@@ -99,6 +111,29 @@ dpi: dpi@7e208000 {
 	};
 	};
 };
 };
 
 
+dsi1: dsi@7e700000 {
+	compatible = "brcm,bcm2835-dsi1";
+	reg = <0x7e700000 0x8c>;
+	interrupts = <2 12>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	#clock-cells = <1>;
+
+	clocks = <&clocks BCM2835_PLLD_DSI1>,
+		 <&clocks BCM2835_CLOCK_DSI1E>,
+		 <&clocks BCM2835_CLOCK_DSI1P>;
+	clock-names = "phy", "escape", "pixel";
+
+	clock-output-names = "dsi1_byte", "dsi1_ddr2", "dsi1_ddr";
+
+	pitouchscreen: panel@0 {
+		compatible = "raspberrypi,touchscreen";
+		reg = <0>;
+
+		<...>
+	};
+};
+
 vec: vec@7e806000 {
 vec: vec@7e806000 {
 	compatible = "brcm,bcm2835-vec";
 	compatible = "brcm,bcm2835-vec";
 	reg = <0x7e806000 0x1000>;
 	reg = <0x7e806000 0x1000>;

+ 12 - 26
Documentation/devicetree/bindings/display/msm/gpu.txt

@@ -1,23 +1,19 @@
 Qualcomm adreno/snapdragon GPU
 Qualcomm adreno/snapdragon GPU
 
 
 Required properties:
 Required properties:
-- compatible: "qcom,adreno-3xx"
+- compatible: "qcom,adreno-XYZ.W", "qcom,adreno"
+    for example: "qcom,adreno-306.0", "qcom,adreno"
+  Note that you need to list the less specific "qcom,adreno" (since this
+  is what the device is matched on), in addition to the more specific
+  with the chip-id.
 - reg: Physical base address and length of the controller's registers.
 - reg: Physical base address and length of the controller's registers.
 - interrupts: The interrupt signal from the gpu.
 - interrupts: The interrupt signal from the gpu.
 - clocks: device clocks
 - clocks: device clocks
   See ../clocks/clock-bindings.txt for details.
   See ../clocks/clock-bindings.txt for details.
 - clock-names: the following clocks are required:
 - clock-names: the following clocks are required:
-  * "core_clk"
-  * "iface_clk"
-  * "mem_iface_clk"
-- qcom,chipid: gpu chip-id.  Note this may become optional for future
-  devices if we can reliably read the chipid from hw
-- qcom,gpu-pwrlevels: list of operating points
-  - compatible: "qcom,gpu-pwrlevels"
-  - for each qcom,gpu-pwrlevel:
-    - qcom,gpu-freq: requested gpu clock speed
-    - NOTE: downstream android driver defines additional parameters to
-      configure memory bandwidth scaling per OPP.
+  * "core"
+  * "iface"
+  * "mem_iface"
 
 
 Example:
 Example:
 
 
@@ -25,28 +21,18 @@ Example:
 	...
 	...
 
 
 	gpu: qcom,kgsl-3d0@4300000 {
 	gpu: qcom,kgsl-3d0@4300000 {
-		compatible = "qcom,adreno-3xx";
+		compatible = "qcom,adreno-320.2", "qcom,adreno";
 		reg = <0x04300000 0x20000>;
 		reg = <0x04300000 0x20000>;
 		reg-names = "kgsl_3d0_reg_memory";
 		reg-names = "kgsl_3d0_reg_memory";
 		interrupts = <GIC_SPI 80 0>;
 		interrupts = <GIC_SPI 80 0>;
 		interrupt-names = "kgsl_3d0_irq";
 		interrupt-names = "kgsl_3d0_irq";
 		clock-names =
 		clock-names =
-		    "core_clk",
-		    "iface_clk",
-		    "mem_iface_clk";
+		    "core",
+		    "iface",
+		    "mem_iface";
 		clocks =
 		clocks =
 		    <&mmcc GFX3D_CLK>,
 		    <&mmcc GFX3D_CLK>,
 		    <&mmcc GFX3D_AHB_CLK>,
 		    <&mmcc GFX3D_AHB_CLK>,
 		    <&mmcc MMSS_IMEM_AHB_CLK>;
 		    <&mmcc MMSS_IMEM_AHB_CLK>;
-		qcom,chipid = <0x03020100>;
-		qcom,gpu-pwrlevels {
-			compatible = "qcom,gpu-pwrlevels";
-			qcom,gpu-pwrlevel@0 {
-				qcom,gpu-freq = <450000000>;
-			};
-			qcom,gpu-pwrlevel@1 {
-				qcom,gpu-freq = <27000000>;
-			};
-		};
 	};
 	};
 };
 };

+ 11 - 4
Documentation/gpu/drm-mm.rst

@@ -291,10 +291,17 @@ To use :c:func:`drm_gem_mmap()`, drivers must fill the struct
 :c:type:`struct drm_driver <drm_driver>` gem_vm_ops field
 :c:type:`struct drm_driver <drm_driver>` gem_vm_ops field
 with a pointer to VM operations.
 with a pointer to VM operations.
 
 
-struct vm_operations_struct \*gem_vm_ops struct
-vm_operations_struct { void (\*open)(struct vm_area_struct \* area);
-void (\*close)(struct vm_area_struct \* area); int (\*fault)(struct
-vm_area_struct \*vma, struct vm_fault \*vmf); };
+The VM operations is a :c:type:`struct vm_operations_struct <vm_operations_struct>`
+made up of several fields, the more interesting ones being:
+
+.. code-block:: c
+
+	struct vm_operations_struct {
+		void (*open)(struct vm_area_struct * area);
+		void (*close)(struct vm_area_struct * area);
+		int (*fault)(struct vm_fault *vmf);
+	};
+
 
 
 The open and close operations must update the GEM object reference
 The open and close operations must update the GEM object reference
 count. Drivers can use the :c:func:`drm_gem_vm_open()` and
 count. Drivers can use the :c:func:`drm_gem_vm_open()` and

+ 1 - 0
MAINTAINERS

@@ -3970,6 +3970,7 @@ S:	Maintained
 L:	linux-media@vger.kernel.org
 L:	linux-media@vger.kernel.org
 L:	dri-devel@lists.freedesktop.org
 L:	dri-devel@lists.freedesktop.org
 F:	drivers/dma-buf/sync_*
 F:	drivers/dma-buf/sync_*
+F:	drivers/dma-buf/dma-fence*
 F:	drivers/dma-buf/sw_sync.c
 F:	drivers/dma-buf/sw_sync.c
 F:	include/linux/sync_file.h
 F:	include/linux/sync_file.h
 F:	include/uapi/linux/sync_file.h
 F:	include/uapi/linux/sync_file.h

+ 1 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu.h

@@ -1709,6 +1709,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
 		       u32 ip_instance, u32 ring,
 		       u32 ip_instance, u32 ring,
 		       struct amdgpu_ring **out_ring);
 		       struct amdgpu_ring **out_ring);
+void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);

+ 27 - 6
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c

@@ -850,16 +850,37 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 				strcpy(fw_name, "amdgpu/fiji_smc.bin");
 				strcpy(fw_name, "amdgpu/fiji_smc.bin");
 				break;
 				break;
 			case CHIP_POLARIS11:
 			case CHIP_POLARIS11:
-				if (type == CGS_UCODE_ID_SMU)
-					strcpy(fw_name, "amdgpu/polaris11_smc.bin");
-				else if (type == CGS_UCODE_ID_SMU_SK)
+				if (type == CGS_UCODE_ID_SMU) {
+					if (((adev->pdev->device == 0x67ef) &&
+					     ((adev->pdev->revision == 0xe0) ||
+					      (adev->pdev->revision == 0xe2) ||
+					      (adev->pdev->revision == 0xe5))) ||
+					    ((adev->pdev->device == 0x67ff) &&
+					     ((adev->pdev->revision == 0xcf) ||
+					      (adev->pdev->revision == 0xef) ||
+					      (adev->pdev->revision == 0xff))))
+						strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
+					else
+						strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+				} else if (type == CGS_UCODE_ID_SMU_SK) {
 					strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
 					strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
+				}
 				break;
 				break;
 			case CHIP_POLARIS10:
 			case CHIP_POLARIS10:
-				if (type == CGS_UCODE_ID_SMU)
-					strcpy(fw_name, "amdgpu/polaris10_smc.bin");
-				else if (type == CGS_UCODE_ID_SMU_SK)
+				if (type == CGS_UCODE_ID_SMU) {
+					if ((adev->pdev->device == 0x67df) &&
+					    ((adev->pdev->revision == 0xe0) ||
+					     (adev->pdev->revision == 0xe3) ||
+					     (adev->pdev->revision == 0xe4) ||
+					     (adev->pdev->revision == 0xe5) ||
+					     (adev->pdev->revision == 0xe7) ||
+					     (adev->pdev->revision == 0xef)))
+						strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
+					else
+						strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+				} else if (type == CGS_UCODE_ID_SMU_SK) {
 					strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
 					strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
+				}
 				break;
 				break;
 			case CHIP_POLARIS12:
 			case CHIP_POLARIS12:
 				strcpy(fw_name, "amdgpu/polaris12_smc.bin");
 				strcpy(fw_name, "amdgpu/polaris12_smc.bin");

+ 1 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c

@@ -344,8 +344,7 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
  * submission. This can result in a debt that can stop buffer migrations
  * submission. This can result in a debt that can stop buffer migrations
  * temporarily.
  * temporarily.
  */
  */
-static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
-					 u64 num_bytes)
+void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes)
 {
 {
 	spin_lock(&adev->mm_stats.lock);
 	spin_lock(&adev->mm_stats.lock);
 	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
 	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);

+ 0 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c

@@ -374,7 +374,6 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
 			&amdgpu_fb_helper_funcs);
 			&amdgpu_fb_helper_funcs);
 
 
 	ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
 	ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
-				 adev->mode_info.num_crtc,
 				 AMDGPUFB_CONN_LIMIT);
 				 AMDGPUFB_CONN_LIMIT);
 	if (ret) {
 	if (ret) {
 		kfree(rfbdev);
 		kfree(rfbdev);

+ 18 - 36
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c

@@ -487,67 +487,50 @@ static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
  *
  *
  * @adev: amdgpu_device pointer
  * @adev: amdgpu_device pointer
  * @bo_va: bo_va to update
  * @bo_va: bo_va to update
+ * @list: validation list
+ * @operation: map or unmap
  *
  *
- * Update the bo_va directly after setting it's address. Errors are not
+ * Update the bo_va directly after setting its address. Errors are not
  * vital here, so they are not reported back to userspace.
  * vital here, so they are not reported back to userspace.
  */
  */
 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 				    struct amdgpu_bo_va *bo_va,
 				    struct amdgpu_bo_va *bo_va,
+				    struct list_head *list,
 				    uint32_t operation)
 				    uint32_t operation)
 {
 {
-	struct ttm_validate_buffer tv, *entry;
-	struct amdgpu_bo_list_entry vm_pd;
-	struct ww_acquire_ctx ticket;
-	struct list_head list, duplicates;
-	int r;
-
-	INIT_LIST_HEAD(&list);
-	INIT_LIST_HEAD(&duplicates);
-
-	tv.bo = &bo_va->bo->tbo;
-	tv.shared = true;
-	list_add(&tv.head, &list);
+	struct ttm_validate_buffer *entry;
+	int r = -ERESTARTSYS;
 
 
-	amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
-
-	/* Provide duplicates to avoid -EALREADY */
-	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
-	if (r)
-		goto error_print;
-
-	list_for_each_entry(entry, &list, head) {
+	list_for_each_entry(entry, list, head) {
 		struct amdgpu_bo *bo =
 		struct amdgpu_bo *bo =
 			container_of(entry->bo, struct amdgpu_bo, tbo);
 			container_of(entry->bo, struct amdgpu_bo, tbo);
 
 
 		/* if anything is swapped out don't swap it in here,
 		/* if anything is swapped out don't swap it in here,
 		   just abort and wait for the next CS */
 		   just abort and wait for the next CS */
 		if (!amdgpu_bo_gpu_accessible(bo))
 		if (!amdgpu_bo_gpu_accessible(bo))
-			goto error_unreserve;
+			goto error;
 
 
 		if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
 		if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
-			goto error_unreserve;
+			goto error;
 	}
 	}
 
 
 	r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
 	r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
 				      NULL);
 				      NULL);
 	if (r)
 	if (r)
-		goto error_unreserve;
+		goto error;
 
 
 	r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
 	r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
 	if (r)
 	if (r)
-		goto error_unreserve;
+		goto error;
 
 
 	r = amdgpu_vm_clear_freed(adev, bo_va->vm);
 	r = amdgpu_vm_clear_freed(adev, bo_va->vm);
 	if (r)
 	if (r)
-		goto error_unreserve;
+		goto error;
 
 
 	if (operation == AMDGPU_VA_OP_MAP)
 	if (operation == AMDGPU_VA_OP_MAP)
 		r = amdgpu_vm_bo_update(adev, bo_va, false);
 		r = amdgpu_vm_bo_update(adev, bo_va, false);
 
 
-error_unreserve:
-	ttm_eu_backoff_reservation(&ticket, &list);
-
-error_print:
+error:
 	if (r && r != -ERESTARTSYS)
 	if (r && r != -ERESTARTSYS)
 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
 }
 }
@@ -564,7 +547,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 	struct amdgpu_bo_list_entry vm_pd;
 	struct amdgpu_bo_list_entry vm_pd;
 	struct ttm_validate_buffer tv;
 	struct ttm_validate_buffer tv;
 	struct ww_acquire_ctx ticket;
 	struct ww_acquire_ctx ticket;
-	struct list_head list, duplicates;
+	struct list_head list;
 	uint32_t invalid_flags, va_flags = 0;
 	uint32_t invalid_flags, va_flags = 0;
 	int r = 0;
 	int r = 0;
 
 
@@ -602,14 +585,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 		return -ENOENT;
 		return -ENOENT;
 	abo = gem_to_amdgpu_bo(gobj);
 	abo = gem_to_amdgpu_bo(gobj);
 	INIT_LIST_HEAD(&list);
 	INIT_LIST_HEAD(&list);
-	INIT_LIST_HEAD(&duplicates);
 	tv.bo = &abo->tbo;
 	tv.bo = &abo->tbo;
-	tv.shared = true;
+	tv.shared = false;
 	list_add(&tv.head, &list);
 	list_add(&tv.head, &list);
 
 
 	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
 	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
 
 
-	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
+	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
 	if (r) {
 	if (r) {
 		drm_gem_object_unreference_unlocked(gobj);
 		drm_gem_object_unreference_unlocked(gobj);
 		return r;
 		return r;
@@ -640,10 +622,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 	default:
 	default:
 		break;
 		break;
 	}
 	}
-	ttm_eu_backoff_reservation(&ticket, &list);
 	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
 	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
 	    !amdgpu_vm_debug)
 	    !amdgpu_vm_debug)
-		amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
+		amdgpu_gem_va_update_vm(adev, bo_va, &list, args->operation);
+	ttm_eu_backoff_reservation(&ticket, &list);
 
 
 	drm_gem_object_unreference_unlocked(gobj);
 	drm_gem_object_unreference_unlocked(gobj);
 	return r;
 	return r;

+ 7 - 9
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c

@@ -97,8 +97,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
 {
 {
 	struct amdgpu_gtt_mgr *mgr = man->priv;
 	struct amdgpu_gtt_mgr *mgr = man->priv;
 	struct drm_mm_node *node = mem->mm_node;
 	struct drm_mm_node *node = mem->mm_node;
-	enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
-	enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+	enum drm_mm_insert_mode mode;
 	unsigned long fpfn, lpfn;
 	unsigned long fpfn, lpfn;
 	int r;
 	int r;
 
 
@@ -115,15 +114,14 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
 	else
 	else
 		lpfn = man->size;
 		lpfn = man->size;
 
 
-	if (place && place->flags & TTM_PL_FLAG_TOPDOWN) {
-		sflags = DRM_MM_SEARCH_BELOW;
-		aflags = DRM_MM_CREATE_TOP;
-	}
+	mode = DRM_MM_INSERT_BEST;
+	if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
+		mode = DRM_MM_INSERT_HIGH;
 
 
 	spin_lock(&mgr->lock);
 	spin_lock(&mgr->lock);
-	r = drm_mm_insert_node_in_range_generic(&mgr->mm, node, mem->num_pages,
-						mem->page_alignment, 0,
-						fpfn, lpfn, sflags, aflags);
+	r = drm_mm_insert_node_in_range(&mgr->mm, node,
+					mem->num_pages, mem->page_alignment, 0,
+					fpfn, lpfn, mode);
 	spin_unlock(&mgr->lock);
 	spin_unlock(&mgr->lock);
 
 
 	if (!r) {
 	if (!r) {

+ 12 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c

@@ -323,6 +323,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 	struct amdgpu_bo *bo;
 	struct amdgpu_bo *bo;
 	enum ttm_bo_type type;
 	enum ttm_bo_type type;
 	unsigned long page_align;
 	unsigned long page_align;
+	u64 initial_bytes_moved;
 	size_t acc_size;
 	size_t acc_size;
 	int r;
 	int r;
 
 
@@ -374,8 +375,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
 	 */
 	 */
 
 
+#ifndef CONFIG_COMPILE_TEST
 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
 	 thanks to write-combining
 	 thanks to write-combining
+#endif
 
 
 	if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
 	if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
@@ -399,12 +402,20 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 		locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
 		locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
 		WARN_ON(!locked);
 		WARN_ON(!locked);
 	}
 	}
+
+	initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
 	r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
 	r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
 			&bo->placement, page_align, !kernel, NULL,
 			&bo->placement, page_align, !kernel, NULL,
 			acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
 			acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
 			&amdgpu_ttm_bo_destroy);
 			&amdgpu_ttm_bo_destroy);
-	if (unlikely(r != 0))
+	amdgpu_cs_report_moved_bytes(adev,
+		atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved);
+
+	if (unlikely(r != 0)) {
+		if (!resv)
+			ww_mutex_unlock(&bo->tbo.resv->lock);
 		return r;
 		return r;
+	}
 
 
 	bo->tbo.priority = ilog2(bo->tbo.num_pages);
 	bo->tbo.priority = ilog2(bo->tbo.num_pages);
 	if (kernel)
 	if (kernel)

+ 11 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c

@@ -1142,12 +1142,22 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 			/* XXX select vce level based on ring/task */
 			/* XXX select vce level based on ring/task */
 			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
 			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
 			mutex_unlock(&adev->pm.mutex);
 			mutex_unlock(&adev->pm.mutex);
+			amdgpu_pm_compute_clocks(adev);
+			amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							AMD_PG_STATE_UNGATE);
+			amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							AMD_CG_STATE_UNGATE);
 		} else {
 		} else {
+			amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							AMD_PG_STATE_GATE);
+			amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							AMD_CG_STATE_GATE);
 			mutex_lock(&adev->pm.mutex);
 			mutex_lock(&adev->pm.mutex);
 			adev->pm.dpm.vce_active = false;
 			adev->pm.dpm.vce_active = false;
 			mutex_unlock(&adev->pm.mutex);
 			mutex_unlock(&adev->pm.mutex);
+			amdgpu_pm_compute_clocks(adev);
 		}
 		}
-		amdgpu_pm_compute_clocks(adev);
+
 	}
 	}
 }
 }
 
 

+ 9 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c

@@ -1113,6 +1113,11 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 			amdgpu_dpm_enable_uvd(adev, false);
 			amdgpu_dpm_enable_uvd(adev, false);
 		} else {
 		} else {
 			amdgpu_asic_set_uvd_clocks(adev, 0, 0);
 			amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+			/* shutdown the UVD block */
+			amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+							    AMD_PG_STATE_GATE);
+			amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+							    AMD_CG_STATE_GATE);
 		}
 		}
 	} else {
 	} else {
 		schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
 		schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
@@ -1129,6 +1134,10 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
 			amdgpu_dpm_enable_uvd(adev, true);
 			amdgpu_dpm_enable_uvd(adev, true);
 		} else {
 		} else {
 			amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
 			amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
+			amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+							    AMD_CG_STATE_UNGATE);
+			amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+							    AMD_PG_STATE_UNGATE);
 		}
 		}
 	}
 	}
 }
 }

+ 9 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c

@@ -321,6 +321,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
 			amdgpu_dpm_enable_vce(adev, false);
 			amdgpu_dpm_enable_vce(adev, false);
 		} else {
 		} else {
 			amdgpu_asic_set_vce_clocks(adev, 0, 0);
 			amdgpu_asic_set_vce_clocks(adev, 0, 0);
+			amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							    AMD_PG_STATE_GATE);
+			amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							    AMD_CG_STATE_GATE);
 		}
 		}
 	} else {
 	} else {
 		schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
 		schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
@@ -346,6 +350,11 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
 			amdgpu_dpm_enable_vce(adev, true);
 			amdgpu_dpm_enable_vce(adev, true);
 		} else {
 		} else {
 			amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
 			amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
+			amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							    AMD_CG_STATE_UNGATE);
+			amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							    AMD_PG_STATE_UNGATE);
+
 		}
 		}
 	}
 	}
 	mutex_unlock(&adev->vce.idle_mutex);
 	mutex_unlock(&adev->vce.idle_mutex);

+ 0 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c

@@ -83,7 +83,6 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 		DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
 		DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
 		amdgpu_vm_bo_rmv(adev, bo_va);
 		amdgpu_vm_bo_rmv(adev, bo_va);
 		ttm_eu_backoff_reservation(&ticket, &list);
 		ttm_eu_backoff_reservation(&ticket, &list);
-		kfree(bo_va);
 		return r;
 		return r;
 	}
 	}
 
 

+ 8 - 12
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c

@@ -97,8 +97,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
 	struct amdgpu_vram_mgr *mgr = man->priv;
 	struct amdgpu_vram_mgr *mgr = man->priv;
 	struct drm_mm *mm = &mgr->mm;
 	struct drm_mm *mm = &mgr->mm;
 	struct drm_mm_node *nodes;
 	struct drm_mm_node *nodes;
-	enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
-	enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+	enum drm_mm_insert_mode mode;
 	unsigned long lpfn, num_nodes, pages_per_node, pages_left;
 	unsigned long lpfn, num_nodes, pages_per_node, pages_left;
 	unsigned i;
 	unsigned i;
 	int r;
 	int r;
@@ -121,10 +120,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
 	if (!nodes)
 	if (!nodes)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	if (place->flags & TTM_PL_FLAG_TOPDOWN) {
-		sflags = DRM_MM_SEARCH_BELOW;
-		aflags = DRM_MM_CREATE_TOP;
-	}
+	mode = DRM_MM_INSERT_BEST;
+	if (place->flags & TTM_PL_FLAG_TOPDOWN)
+		mode = DRM_MM_INSERT_HIGH;
 
 
 	pages_left = mem->num_pages;
 	pages_left = mem->num_pages;
 
 
@@ -135,13 +133,11 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
 
 
 		if (pages == pages_per_node)
 		if (pages == pages_per_node)
 			alignment = pages_per_node;
 			alignment = pages_per_node;
-		else
-			sflags |= DRM_MM_SEARCH_BEST;
 
 
-		r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
-							alignment, 0,
-							place->fpfn, lpfn,
-							sflags, aflags);
+		r = drm_mm_insert_node_in_range(mm, &nodes[i],
+						pages, alignment, 0,
+						place->fpfn, lpfn,
+						mode);
 		if (unlikely(r))
 		if (unlikely(r))
 			goto error;
 			goto error;
 
 

+ 14 - 15
drivers/gpu/drm/amd/amdgpu/ci_dpm.c

@@ -2210,7 +2210,6 @@ static void ci_clear_vc(struct amdgpu_device *adev)
 
 
 static int ci_upload_firmware(struct amdgpu_device *adev)
 static int ci_upload_firmware(struct amdgpu_device *adev)
 {
 {
-	struct ci_power_info *pi = ci_get_pi(adev);
 	int i, ret;
 	int i, ret;
 
 
 	if (amdgpu_ci_is_smc_running(adev)) {
 	if (amdgpu_ci_is_smc_running(adev)) {
@@ -2227,7 +2226,7 @@ static int ci_upload_firmware(struct amdgpu_device *adev)
 	amdgpu_ci_stop_smc_clock(adev);
 	amdgpu_ci_stop_smc_clock(adev);
 	amdgpu_ci_reset_smc(adev);
 	amdgpu_ci_reset_smc(adev);
 
 
-	ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end);
+	ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
 
 
 	return ret;
 	return ret;
 
 
@@ -4257,12 +4256,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
 
 
 	if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
 	if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
 		if (amdgpu_new_state->evclk) {
 		if (amdgpu_new_state->evclk) {
-			/* turn the clocks on when encoding */
-			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
-							    AMD_CG_STATE_UNGATE);
-			if (ret)
-				return ret;
-
 			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
 			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
 			tmp = RREG32_SMC(ixDPM_TABLE_475);
 			tmp = RREG32_SMC(ixDPM_TABLE_475);
 			tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
 			tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
@@ -4274,9 +4267,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
 			ret = ci_enable_vce_dpm(adev, false);
 			ret = ci_enable_vce_dpm(adev, false);
 			if (ret)
 			if (ret)
 				return ret;
 				return ret;
-			/* turn the clocks off when not encoding */
-			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
-							    AMD_CG_STATE_GATE);
 		}
 		}
 	}
 	}
 	return ret;
 	return ret;
@@ -6278,13 +6268,13 @@ static int ci_dpm_sw_init(void *handle)
 	adev->pm.current_mclk = adev->clock.default_mclk;
 	adev->pm.current_mclk = adev->clock.default_mclk;
 	adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
 	adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
 
 
-	if (amdgpu_dpm == 0)
-		return 0;
-
 	ret = ci_dpm_init_microcode(adev);
 	ret = ci_dpm_init_microcode(adev);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
+	if (amdgpu_dpm == 0)
+		return 0;
+
 	INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
 	INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
 	mutex_lock(&adev->pm.mutex);
 	mutex_lock(&adev->pm.mutex);
 	ret = ci_dpm_init(adev);
 	ret = ci_dpm_init(adev);
@@ -6328,8 +6318,15 @@ static int ci_dpm_hw_init(void *handle)
 
 
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
-	if (!amdgpu_dpm)
+	if (!amdgpu_dpm) {
+		ret = ci_upload_firmware(adev);
+		if (ret) {
+			DRM_ERROR("ci_upload_firmware failed\n");
+			return ret;
+		}
+		ci_dpm_start_smc(adev);
 		return 0;
 		return 0;
+	}
 
 
 	mutex_lock(&adev->pm.mutex);
 	mutex_lock(&adev->pm.mutex);
 	ci_dpm_setup_asic(adev);
 	ci_dpm_setup_asic(adev);
@@ -6351,6 +6348,8 @@ static int ci_dpm_hw_fini(void *handle)
 		mutex_lock(&adev->pm.mutex);
 		mutex_lock(&adev->pm.mutex);
 		ci_dpm_disable(adev);
 		ci_dpm_disable(adev);
 		mutex_unlock(&adev->pm.mutex);
 		mutex_unlock(&adev->pm.mutex);
+	} else {
+		ci_dpm_stop_smc(adev);
 	}
 	}
 
 
 	return 0;
 	return 0;

+ 2 - 2
drivers/gpu/drm/amd/amdgpu/cik.c

@@ -1722,8 +1722,8 @@ static int cik_common_early_init(void *handle)
 			  AMD_PG_SUPPORT_GFX_SMG |
 			  AMD_PG_SUPPORT_GFX_SMG |
 			  AMD_PG_SUPPORT_GFX_DMG |*/
 			  AMD_PG_SUPPORT_GFX_DMG |*/
 			AMD_PG_SUPPORT_UVD |
 			AMD_PG_SUPPORT_UVD |
-			/*AMD_PG_SUPPORT_VCE |
-			  AMD_PG_SUPPORT_CP |
+			AMD_PG_SUPPORT_VCE |
+			/*  AMD_PG_SUPPORT_CP |
 			  AMD_PG_SUPPORT_GDS |
 			  AMD_PG_SUPPORT_GDS |
 			  AMD_PG_SUPPORT_RLC_SMU_HS |
 			  AMD_PG_SUPPORT_RLC_SMU_HS |
 			  AMD_PG_SUPPORT_ACP |
 			  AMD_PG_SUPPORT_ACP |

+ 87 - 129
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c

@@ -1325,21 +1325,19 @@ static u32 gfx_v6_0_create_bitmask(u32 bit_width)
 	return (u32)(((u64)1 << bit_width) - 1);
 	return (u32)(((u64)1 << bit_width) - 1);
 }
 }
 
 
-static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
-				    u32 max_rb_num_per_se,
-				    u32 sh_per_se)
+static u32 gfx_v6_0_get_rb_active_bitmap(struct amdgpu_device *adev)
 {
 {
 	u32 data, mask;
 	u32 data, mask;
 
 
-	data = RREG32(mmCC_RB_BACKEND_DISABLE);
-	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
-	data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+	data = RREG32(mmCC_RB_BACKEND_DISABLE) |
+		RREG32(mmGC_USER_RB_BACKEND_DISABLE);
 
 
-	data >>= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
+	data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
 
 
-	mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se);
+	mask = gfx_v6_0_create_bitmask(adev->gfx.config.max_backends_per_se/
+					adev->gfx.config.max_sh_per_se);
 
 
-	return data & mask;
+	return ~data & mask;
 }
 }
 
 
 static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
 static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
@@ -1468,68 +1466,55 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
 	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 }
 }
 
 
-static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
-			      u32 se_num, u32 sh_per_se,
-			      u32 max_rb_num_per_se)
+static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
 {
 {
 	int i, j;
 	int i, j;
-	u32 data, mask;
-	u32 disabled_rbs = 0;
-	u32 enabled_rbs = 0;
+	u32 data;
+	u32 raster_config = 0;
+	u32 active_rbs = 0;
+	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
+					adev->gfx.config.max_sh_per_se;
 	unsigned num_rb_pipes;
 	unsigned num_rb_pipes;
 
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	mutex_lock(&adev->grbm_idx_mutex);
-	for (i = 0; i < se_num; i++) {
-		for (j = 0; j < sh_per_se; j++) {
+	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
 			gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
 			gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
-			data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
-			disabled_rbs |= data << ((i * sh_per_se + j) * 2);
+			data = gfx_v6_0_get_rb_active_bitmap(adev);
+			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
+					rb_bitmap_width_per_sh);
 		}
 		}
 	}
 	}
 	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-	mutex_unlock(&adev->grbm_idx_mutex);
-
-	mask = 1;
-	for (i = 0; i < max_rb_num_per_se * se_num; i++) {
-		if (!(disabled_rbs & mask))
-			enabled_rbs |= mask;
-		mask <<= 1;
-	}
 
 
-	adev->gfx.config.backend_enable_mask = enabled_rbs;
-	adev->gfx.config.num_rbs = hweight32(enabled_rbs);
+	adev->gfx.config.backend_enable_mask = active_rbs;
+	adev->gfx.config.num_rbs = hweight32(active_rbs);
 
 
 	num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
 	num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
 			     adev->gfx.config.max_shader_engines, 16);
 			     adev->gfx.config.max_shader_engines, 16);
 
 
-	mutex_lock(&adev->grbm_idx_mutex);
-	for (i = 0; i < se_num; i++) {
-		gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
-		data = 0;
-		for (j = 0; j < sh_per_se; j++) {
-			switch (enabled_rbs & 3) {
-			case 1:
-				data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
-				break;
-			case 2:
-				data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
-				break;
-			case 3:
-			default:
-				data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
-				break;
-			}
-			enabled_rbs >>= 2;
-		}
-		gfx_v6_0_raster_config(adev, &data);
+	gfx_v6_0_raster_config(adev, &raster_config);
 
 
-		if (!adev->gfx.config.backend_enable_mask ||
-				adev->gfx.config.num_rbs >= num_rb_pipes)
-			WREG32(mmPA_SC_RASTER_CONFIG, data);
-		else
-			gfx_v6_0_write_harvested_raster_configs(adev, data,
-								adev->gfx.config.backend_enable_mask,
-								num_rb_pipes);
+	if (!adev->gfx.config.backend_enable_mask ||
+			adev->gfx.config.num_rbs >= num_rb_pipes) {
+		WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
+	} else {
+		gfx_v6_0_write_harvested_raster_configs(adev, raster_config,
+							adev->gfx.config.backend_enable_mask,
+							num_rb_pipes);
+	}
+
+	/* cache the values for userspace */
+	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+			gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+			adev->gfx.config.rb_config[i][j].rb_backend_disable =
+				RREG32(mmCC_RB_BACKEND_DISABLE);
+			adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+				RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+			adev->gfx.config.rb_config[i][j].raster_config =
+				RREG32(mmPA_SC_RASTER_CONFIG);
+		}
 	}
 	}
 	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 	mutex_unlock(&adev->grbm_idx_mutex);
 	mutex_unlock(&adev->grbm_idx_mutex);
@@ -1540,36 +1525,44 @@ static void gmc_v6_0_init_compute_vmid(struct amdgpu_device *adev)
 }
 }
 */
 */
 
 
-static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh)
+static void gfx_v6_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+						 u32 bitmap)
 {
 {
-	u32 data, mask;
+	u32 data;
 
 
-	data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
-	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
-	data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
+	if (!bitmap)
+		return;
 
 
-	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
 
 
-	mask = gfx_v6_0_create_bitmask(cu_per_sh);
+	WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
+}
 
 
-	return ~data & mask;
+static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev)
+{
+	u32 data, mask;
+
+	data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
+		RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
+
+	mask = gfx_v6_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
+	return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
 }
 }
 
 
 
 
-static void gfx_v6_0_setup_spi(struct amdgpu_device *adev,
-			 u32 se_num, u32 sh_per_se,
-			 u32 cu_per_sh)
+static void gfx_v6_0_setup_spi(struct amdgpu_device *adev)
 {
 {
 	int i, j, k;
 	int i, j, k;
 	u32 data, mask;
 	u32 data, mask;
 	u32 active_cu = 0;
 	u32 active_cu = 0;
 
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	mutex_lock(&adev->grbm_idx_mutex);
-	for (i = 0; i < se_num; i++) {
-		for (j = 0; j < sh_per_se; j++) {
+	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
 			gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
 			gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
 			data = RREG32(mmSPI_STATIC_THREAD_MGMT_3);
 			data = RREG32(mmSPI_STATIC_THREAD_MGMT_3);
-			active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh);
+			active_cu = gfx_v6_0_get_cu_enabled(adev);
 
 
 			mask = 1;
 			mask = 1;
 			for (k = 0; k < 16; k++) {
 			for (k = 0; k < 16; k++) {
@@ -1717,6 +1710,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
 		gb_addr_config |= 2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT;
 		gb_addr_config |= 2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT;
 		break;
 		break;
 	}
 	}
+	gb_addr_config &= ~GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK;
+	if (adev->gfx.config.max_shader_engines == 2)
+		gb_addr_config |= 1 << GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT;
 	adev->gfx.config.gb_addr_config = gb_addr_config;
 	adev->gfx.config.gb_addr_config = gb_addr_config;
 
 
 	WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
 	WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
@@ -1735,13 +1731,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
 #endif
 #endif
 	gfx_v6_0_tiling_mode_table_init(adev);
 	gfx_v6_0_tiling_mode_table_init(adev);
 
 
-	gfx_v6_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
-		    adev->gfx.config.max_sh_per_se,
-		    adev->gfx.config.max_backends_per_se);
+	gfx_v6_0_setup_rb(adev);
 
 
-	gfx_v6_0_setup_spi(adev, adev->gfx.config.max_shader_engines,
-		     adev->gfx.config.max_sh_per_se,
-		     adev->gfx.config.max_cu_per_sh);
+	gfx_v6_0_setup_spi(adev);
 
 
 	gfx_v6_0_get_cu_info(adev);
 	gfx_v6_0_get_cu_info(adev);
 
 
@@ -2941,61 +2933,16 @@ static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev,
 	}
 	}
 }
 }
 
 
-static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev,
-					 u32 se, u32 sh)
-{
-
-	u32 mask = 0, tmp, tmp1;
-	int i;
-
-	mutex_lock(&adev->grbm_idx_mutex);
-	gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff);
-	tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
-	tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
-	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-	mutex_unlock(&adev->grbm_idx_mutex);
-
-	tmp &= 0xffff0000;
-
-	tmp |= tmp1;
-	tmp >>= 16;
-
-	for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
-		mask <<= 1;
-		mask |= 1;
-	}
-
-	return (~tmp) & mask;
-}
-
 static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev)
 static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev)
 {
 {
-	u32 i, j, k, active_cu_number = 0;
-
-	u32 mask, counter, cu_bitmap;
-	u32 tmp = 0;
-
-	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
-		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
-			mask = 1;
-			cu_bitmap = 0;
-			counter  = 0;
-			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
-				if (gfx_v6_0_get_cu_active_bitmap(adev, i, j) & mask) {
-					if (counter < 2)
-						cu_bitmap |= mask;
-					counter++;
-				}
-				mask <<= 1;
-			}
+	u32 tmp;
 
 
-			active_cu_number += counter;
-			tmp |= (cu_bitmap << (i * 16 + j * 8));
-		}
-	}
+	WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
 
 
-	WREG32(mmRLC_PG_AO_CU_MASK, tmp);
-	WREG32_FIELD(RLC_MAX_PG_CU, MAX_POWERED_UP_CU, active_cu_number);
+	tmp = RREG32(mmRLC_MAX_PG_CU);
+	tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
+	tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
+	WREG32(mmRLC_MAX_PG_CU, tmp);
 }
 }
 
 
 static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
 static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
@@ -3770,18 +3717,26 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
 	int i, j, k, counter, active_cu_number = 0;
 	int i, j, k, counter, active_cu_number = 0;
 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
 	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
 	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+	unsigned disable_masks[4 * 2];
 
 
 	memset(cu_info, 0, sizeof(*cu_info));
 	memset(cu_info, 0, sizeof(*cu_info));
 
 
+	amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+
+	mutex_lock(&adev->grbm_idx_mutex);
 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
 			mask = 1;
 			mask = 1;
 			ao_bitmap = 0;
 			ao_bitmap = 0;
 			counter = 0;
 			counter = 0;
-			bitmap = gfx_v6_0_get_cu_active_bitmap(adev, i, j);
+			gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+			if (i < 4 && j < 2)
+				gfx_v6_0_set_user_cu_inactive_bitmap(
+					adev, disable_masks[i * 2 + j]);
+			bitmap = gfx_v6_0_get_cu_enabled(adev);
 			cu_info->bitmap[i][j] = bitmap;
 			cu_info->bitmap[i][j] = bitmap;
 
 
-			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+			for (k = 0; k < 16; k++) {
 				if (bitmap & mask) {
 				if (bitmap & mask) {
 					if (counter < 2)
 					if (counter < 2)
 						ao_bitmap |= mask;
 						ao_bitmap |= mask;
@@ -3794,6 +3749,9 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
 		}
 		}
 	}
 	}
 
 
+	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
 	cu_info->number = active_cu_number;
 	cu_info->number = active_cu_number;
 	cu_info->ao_cu_mask = ao_cu_mask;
 	cu_info->ao_cu_mask = ao_cu_mask;
 }
 }

+ 15 - 53
drivers/gpu/drm/amd/amdgpu/kv_dpm.c

@@ -1550,11 +1550,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
 
 
 	if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
 	if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
 		kv_dpm_powergate_vce(adev, false);
 		kv_dpm_powergate_vce(adev, false);
-		/* turn the clocks on when encoding */
-		ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
-						    AMD_CG_STATE_UNGATE);
-		if (ret)
-			return ret;
 		if (pi->caps_stable_p_state)
 		if (pi->caps_stable_p_state)
 			pi->vce_boot_level = table->count - 1;
 			pi->vce_boot_level = table->count - 1;
 		else
 		else
@@ -1573,15 +1568,9 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
 			amdgpu_kv_send_msg_to_smc_with_parameter(adev,
 			amdgpu_kv_send_msg_to_smc_with_parameter(adev,
 							  PPSMC_MSG_VCEDPM_SetEnabledMask,
 							  PPSMC_MSG_VCEDPM_SetEnabledMask,
 							  (1 << pi->vce_boot_level));
 							  (1 << pi->vce_boot_level));
-
 		kv_enable_vce_dpm(adev, true);
 		kv_enable_vce_dpm(adev, true);
 	} else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
 	} else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
 		kv_enable_vce_dpm(adev, false);
 		kv_enable_vce_dpm(adev, false);
-		/* turn the clocks off when not encoding */
-		ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
-						    AMD_CG_STATE_GATE);
-		if (ret)
-			return ret;
 		kv_dpm_powergate_vce(adev, true);
 		kv_dpm_powergate_vce(adev, true);
 	}
 	}
 
 
@@ -1688,70 +1677,44 @@ static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
 	struct kv_power_info *pi = kv_get_pi(adev);
 	struct kv_power_info *pi = kv_get_pi(adev);
 	int ret;
 	int ret;
 
 
-	if (pi->uvd_power_gated == gate)
-		return;
-
 	pi->uvd_power_gated = gate;
 	pi->uvd_power_gated = gate;
 
 
 	if (gate) {
 	if (gate) {
-		if (pi->caps_uvd_pg) {
-			/* disable clockgating so we can properly shut down the block */
-			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
-							    AMD_CG_STATE_UNGATE);
-			/* shutdown the UVD block */
-			ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
-							    AMD_PG_STATE_GATE);
-			/* XXX: check for errors */
-		}
+		/* stop the UVD block */
+		ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+							AMD_PG_STATE_GATE);
 		kv_update_uvd_dpm(adev, gate);
 		kv_update_uvd_dpm(adev, gate);
 		if (pi->caps_uvd_pg)
 		if (pi->caps_uvd_pg)
 			/* power off the UVD block */
 			/* power off the UVD block */
 			amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);
 			amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);
 	} else {
 	} else {
-		if (pi->caps_uvd_pg) {
+		if (pi->caps_uvd_pg)
 			/* power on the UVD block */
 			/* power on the UVD block */
 			amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
 			amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
 			/* re-init the UVD block */
 			/* re-init the UVD block */
-			ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
-							    AMD_PG_STATE_UNGATE);
-			/* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
-			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
-							    AMD_CG_STATE_GATE);
-			/* XXX: check for errors */
-		}
 		kv_update_uvd_dpm(adev, gate);
 		kv_update_uvd_dpm(adev, gate);
+
+		ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+							AMD_PG_STATE_UNGATE);
 	}
 	}
 }
 }
 
 
 static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
 static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
 {
 {
 	struct kv_power_info *pi = kv_get_pi(adev);
 	struct kv_power_info *pi = kv_get_pi(adev);
-	int ret;
 
 
 	if (pi->vce_power_gated == gate)
 	if (pi->vce_power_gated == gate)
 		return;
 		return;
 
 
 	pi->vce_power_gated = gate;
 	pi->vce_power_gated = gate;
 
 
-	if (gate) {
-		if (pi->caps_vce_pg) {
-			/* shutdown the VCE block */
-			ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
-							    AMD_PG_STATE_GATE);
-			/* XXX: check for errors */
-			/* power off the VCE block */
-			amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
-		}
-	} else {
-		if (pi->caps_vce_pg) {
-			/* power on the VCE block */
-			amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
-			/* re-init the VCE block */
-			ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
-							    AMD_PG_STATE_UNGATE);
-			/* XXX: check for errors */
-		}
-	}
+	if (!pi->caps_vce_pg)
+		return;
+
+	if (gate)
+		amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
+	else
+		amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
 }
 }
 
 
 static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
 static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
@@ -3009,8 +2972,7 @@ static int kv_dpm_late_init(void *handle)
 
 
 	kv_dpm_powergate_acp(adev, true);
 	kv_dpm_powergate_acp(adev, true);
 	kv_dpm_powergate_samu(adev, true);
 	kv_dpm_powergate_samu(adev, true);
-	kv_dpm_powergate_vce(adev, true);
-	kv_dpm_powergate_uvd(adev, true);
+
 	return 0;
 	return 0;
 }
 }
 
 

+ 73 - 17
drivers/gpu/drm/amd/amdgpu/si.c

@@ -1010,24 +1010,81 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
 	{PA_SC_RASTER_CONFIG, false, true},
 	{PA_SC_RASTER_CONFIG, false, true},
 };
 };
 
 
-static uint32_t si_read_indexed_register(struct amdgpu_device *adev,
-					  u32 se_num, u32 sh_num,
-					  u32 reg_offset)
+static uint32_t si_get_register_value(struct amdgpu_device *adev,
+				      bool indexed, u32 se_num,
+				      u32 sh_num, u32 reg_offset)
 {
 {
-	uint32_t val;
+	if (indexed) {
+		uint32_t val;
+		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+		switch (reg_offset) {
+		case mmCC_RB_BACKEND_DISABLE:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+		case mmGC_USER_RB_BACKEND_DISABLE:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+		case mmPA_SC_RASTER_CONFIG:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+		}
 
 
-	mutex_lock(&adev->grbm_idx_mutex);
-	if (se_num != 0xffffffff || sh_num != 0xffffffff)
-		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+		mutex_lock(&adev->grbm_idx_mutex);
+		if (se_num != 0xffffffff || sh_num != 0xffffffff)
+			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 
 
-	val = RREG32(reg_offset);
+		val = RREG32(reg_offset);
 
 
-	if (se_num != 0xffffffff || sh_num != 0xffffffff)
-		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-	mutex_unlock(&adev->grbm_idx_mutex);
-	return val;
+		if (se_num != 0xffffffff || sh_num != 0xffffffff)
+			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+		mutex_unlock(&adev->grbm_idx_mutex);
+		return val;
+	} else {
+		unsigned idx;
+
+		switch (reg_offset) {
+		case mmGB_ADDR_CONFIG:
+			return adev->gfx.config.gb_addr_config;
+		case mmMC_ARB_RAMCFG:
+			return adev->gfx.config.mc_arb_ramcfg;
+		case mmGB_TILE_MODE0:
+		case mmGB_TILE_MODE1:
+		case mmGB_TILE_MODE2:
+		case mmGB_TILE_MODE3:
+		case mmGB_TILE_MODE4:
+		case mmGB_TILE_MODE5:
+		case mmGB_TILE_MODE6:
+		case mmGB_TILE_MODE7:
+		case mmGB_TILE_MODE8:
+		case mmGB_TILE_MODE9:
+		case mmGB_TILE_MODE10:
+		case mmGB_TILE_MODE11:
+		case mmGB_TILE_MODE12:
+		case mmGB_TILE_MODE13:
+		case mmGB_TILE_MODE14:
+		case mmGB_TILE_MODE15:
+		case mmGB_TILE_MODE16:
+		case mmGB_TILE_MODE17:
+		case mmGB_TILE_MODE18:
+		case mmGB_TILE_MODE19:
+		case mmGB_TILE_MODE20:
+		case mmGB_TILE_MODE21:
+		case mmGB_TILE_MODE22:
+		case mmGB_TILE_MODE23:
+		case mmGB_TILE_MODE24:
+		case mmGB_TILE_MODE25:
+		case mmGB_TILE_MODE26:
+		case mmGB_TILE_MODE27:
+		case mmGB_TILE_MODE28:
+		case mmGB_TILE_MODE29:
+		case mmGB_TILE_MODE30:
+		case mmGB_TILE_MODE31:
+			idx = (reg_offset - mmGB_TILE_MODE0);
+			return adev->gfx.config.tile_mode_array[idx];
+		default:
+			return RREG32(reg_offset);
+		}
+	}
 }
 }
-
 static int si_read_register(struct amdgpu_device *adev, u32 se_num,
 static int si_read_register(struct amdgpu_device *adev, u32 se_num,
 			     u32 sh_num, u32 reg_offset, u32 *value)
 			     u32 sh_num, u32 reg_offset, u32 *value)
 {
 {
@@ -1039,10 +1096,9 @@ static int si_read_register(struct amdgpu_device *adev, u32 se_num,
 			continue;
 			continue;
 
 
 		if (!si_allowed_read_registers[i].untouched)
 		if (!si_allowed_read_registers[i].untouched)
-			*value = si_allowed_read_registers[i].grbm_indexed ?
-				 si_read_indexed_register(adev, se_num,
-							   sh_num, reg_offset) :
-				 RREG32(reg_offset);
+			*value = si_get_register_value(adev,
+						si_allowed_read_registers[i].grbm_indexed,
+						se_num, sh_num, reg_offset);
 		return 0;
 		return 0;
 	}
 	}
 	return -EINVAL;
 	return -EINVAL;

+ 2 - 2
drivers/gpu/drm/amd/amdgpu/si_enums.h

@@ -143,8 +143,8 @@
 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET    0x3D
 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET    0x3D
 
 
 #define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
 #define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
-#define VERDE_GB_ADDR_CONFIG_GOLDEN         0x12010002
-#define HAINAN_GB_ADDR_CONFIG_GOLDEN        0x02010001
+#define VERDE_GB_ADDR_CONFIG_GOLDEN         0x02010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN        0x02011003
 
 
 #define PACKET3(op, n)  ((RADEON_PACKET_TYPE3 << 30) |                  \
 #define PACKET3(op, n)  ((RADEON_PACKET_TYPE3 << 30) |                  \
                          (((op) & 0xFF) << 8) |                         \
                          (((op) & 0xFF) << 8) |                         \

+ 86 - 42
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c

@@ -159,9 +159,6 @@ static int uvd_v4_2_hw_init(void *handle)
 
 
 	uvd_v4_2_enable_mgcg(adev, true);
 	uvd_v4_2_enable_mgcg(adev, true);
 	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
 	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
-	r = uvd_v4_2_start(adev);
-	if (r)
-		goto done;
 
 
 	ring->ready = true;
 	ring->ready = true;
 	r = amdgpu_ring_test_ring(ring);
 	r = amdgpu_ring_test_ring(ring);
@@ -198,7 +195,6 @@ static int uvd_v4_2_hw_init(void *handle)
 	amdgpu_ring_commit(ring);
 	amdgpu_ring_commit(ring);
 
 
 done:
 done:
-
 	if (!r)
 	if (!r)
 		DRM_INFO("UVD initialized successfully.\n");
 		DRM_INFO("UVD initialized successfully.\n");
 
 
@@ -217,7 +213,9 @@ static int uvd_v4_2_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_ring *ring = &adev->uvd.ring;
 	struct amdgpu_ring *ring = &adev->uvd.ring;
 
 
-	uvd_v4_2_stop(adev);
+	if (RREG32(mmUVD_STATUS) != 0)
+		uvd_v4_2_stop(adev);
+
 	ring->ready = false;
 	ring->ready = false;
 
 
 	return 0;
 	return 0;
@@ -267,37 +265,26 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
 	struct amdgpu_ring *ring = &adev->uvd.ring;
 	struct amdgpu_ring *ring = &adev->uvd.ring;
 	uint32_t rb_bufsz;
 	uint32_t rb_bufsz;
 	int i, j, r;
 	int i, j, r;
+	u32 tmp;
 	/* disable byte swapping */
 	/* disable byte swapping */
 	u32 lmi_swap_cntl = 0;
 	u32 lmi_swap_cntl = 0;
 	u32 mp_swap_cntl = 0;
 	u32 mp_swap_cntl = 0;
 
 
-	WREG32(mmUVD_CGC_GATE, 0);
-	uvd_v4_2_set_dcm(adev, true);
+	/* set uvd busy */
+	WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
 
 
-	uvd_v4_2_mc_resume(adev);
-
-	/* disable interupt */
-	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
-
-	/* Stall UMC and register bus before resetting VCPU */
-	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
-	mdelay(1);
-
-	/* put LMI, VCPU, RBC etc... into reset */
-	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
-		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
-		UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
-		UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
-		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
-	mdelay(5);
+	uvd_v4_2_set_dcm(adev, true);
+	WREG32(mmUVD_CGC_GATE, 0);
 
 
 	/* take UVD block out of reset */
 	/* take UVD block out of reset */
 	WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
 	WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
 	mdelay(5);
 	mdelay(5);
 
 
-	/* initialize UVD memory controller */
-	WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
-			     (1 << 21) | (1 << 9) | (1 << 20));
+	/* enable VCPU clock */
+	WREG32(mmUVD_VCPU_CNTL,  1 << 9);
+
+	/* disable interupt */
+	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
 
 
 #ifdef __BIG_ENDIAN
 #ifdef __BIG_ENDIAN
 	/* swap (8 in 32) RB and IB */
 	/* swap (8 in 32) RB and IB */
@@ -306,6 +293,11 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
 #endif
 #endif
 	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
 	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
+	/* initialize UVD memory controller */
+	WREG32(mmUVD_LMI_CTRL, 0x203108);
+
+	tmp = RREG32(mmUVD_MPC_CNTL);
+	WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
 
 
 	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
 	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
 	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
 	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
@@ -314,18 +306,20 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
 	WREG32(mmUVD_MPC_SET_ALU, 0);
 	WREG32(mmUVD_MPC_SET_ALU, 0);
 	WREG32(mmUVD_MPC_SET_MUX, 0x88);
 	WREG32(mmUVD_MPC_SET_MUX, 0x88);
 
 
-	/* take all subblocks out of reset, except VCPU */
-	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
-	mdelay(5);
+	uvd_v4_2_mc_resume(adev);
 
 
-	/* enable VCPU clock */
-	WREG32(mmUVD_VCPU_CNTL,  1 << 9);
+	tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
+	WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
 
 
 	/* enable UMC */
 	/* enable UMC */
 	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
 	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
 
 
-	/* boot up the VCPU */
-	WREG32(mmUVD_SOFT_RESET, 0);
+	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
+
+	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
+
+	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+
 	mdelay(10);
 	mdelay(10);
 
 
 	for (i = 0; i < 10; ++i) {
 	for (i = 0; i < 10; ++i) {
@@ -357,6 +351,8 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
 	/* enable interupt */
 	/* enable interupt */
 	WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
 	WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
 
 
+	WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
+
 	/* force RBC into idle state */
 	/* force RBC into idle state */
 	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
 	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
 
 
@@ -393,22 +389,54 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
  */
  */
 static void uvd_v4_2_stop(struct amdgpu_device *adev)
 static void uvd_v4_2_stop(struct amdgpu_device *adev)
 {
 {
-	/* force RBC into idle state */
+	uint32_t i, j;
+	uint32_t status;
+
 	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
 	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
 
 
+	for (i = 0; i < 10; ++i) {
+		for (j = 0; j < 100; ++j) {
+			status = RREG32(mmUVD_STATUS);
+			if (status & 2)
+				break;
+			mdelay(1);
+		}
+		break;
+	}
+
+	for (i = 0; i < 10; ++i) {
+		for (j = 0; j < 100; ++j) {
+			status = RREG32(mmUVD_LMI_STATUS);
+			if (status & 0xf)
+				break;
+			mdelay(1);
+		}
+		break;
+	}
+
 	/* Stall UMC and register bus before resetting VCPU */
 	/* Stall UMC and register bus before resetting VCPU */
 	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
 	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
-	mdelay(1);
 
 
-	/* put VCPU into reset */
-	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
-	mdelay(5);
+	for (i = 0; i < 10; ++i) {
+		for (j = 0; j < 100; ++j) {
+			status = RREG32(mmUVD_LMI_STATUS);
+			if (status & 0x240)
+				break;
+			mdelay(1);
+		}
+		break;
+	}
 
 
-	/* disable VCPU clock */
-	WREG32(mmUVD_VCPU_CNTL, 0x0);
+	WREG32_P(0x3D49, 0, ~(1 << 2));
 
 
-	/* Unstall UMC and register bus */
-	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+	WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
+
+	/* put LMI, VCPU, RBC etc... into reset */
+	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
+
+	WREG32(mmUVD_STATUS, 0);
 
 
 	uvd_v4_2_set_dcm(adev, false);
 	uvd_v4_2_set_dcm(adev, false);
 }
 }
@@ -694,8 +722,24 @@ static int uvd_v4_2_set_powergating_state(void *handle,
 
 
 	if (state == AMD_PG_STATE_GATE) {
 	if (state == AMD_PG_STATE_GATE) {
 		uvd_v4_2_stop(adev);
 		uvd_v4_2_stop(adev);
+		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
+			if (!(RREG32_SMC(ixCURRENT_PG_STATUS) & 0x4)) {
+				WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
+							UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
+							UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
+				mdelay(20);
+			}
+		}
 		return 0;
 		return 0;
 	} else {
 	} else {
+		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
+			if (RREG32_SMC(ixCURRENT_PG_STATUS) & 0x4) {
+				WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
+						UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
+						UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
+				mdelay(30);
+			}
+		}
 		return uvd_v4_2_start(adev);
 		return uvd_v4_2_start(adev);
 	}
 	}
 }
 }

+ 10 - 14
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c

@@ -152,9 +152,9 @@ static int uvd_v5_0_hw_init(void *handle)
 	uint32_t tmp;
 	uint32_t tmp;
 	int r;
 	int r;
 
 
-	r = uvd_v5_0_start(adev);
-	if (r)
-		goto done;
+	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
+	uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+	uvd_v5_0_enable_mgcg(adev, true);
 
 
 	ring->ready = true;
 	ring->ready = true;
 	r = amdgpu_ring_test_ring(ring);
 	r = amdgpu_ring_test_ring(ring);
@@ -189,11 +189,13 @@ static int uvd_v5_0_hw_init(void *handle)
 	amdgpu_ring_write(ring, 3);
 	amdgpu_ring_write(ring, 3);
 
 
 	amdgpu_ring_commit(ring);
 	amdgpu_ring_commit(ring);
+
 done:
 done:
 	if (!r)
 	if (!r)
 		DRM_INFO("UVD initialized successfully.\n");
 		DRM_INFO("UVD initialized successfully.\n");
 
 
 	return r;
 	return r;
+
 }
 }
 
 
 /**
 /**
@@ -208,7 +210,9 @@ static int uvd_v5_0_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_ring *ring = &adev->uvd.ring;
 	struct amdgpu_ring *ring = &adev->uvd.ring;
 
 
-	uvd_v5_0_stop(adev);
+	if (RREG32(mmUVD_STATUS) != 0)
+		uvd_v5_0_stop(adev);
+
 	ring->ready = false;
 	ring->ready = false;
 
 
 	return 0;
 	return 0;
@@ -310,10 +314,6 @@ static int uvd_v5_0_start(struct amdgpu_device *adev)
 
 
 	uvd_v5_0_mc_resume(adev);
 	uvd_v5_0_mc_resume(adev);
 
 
-	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
-	uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
-	uvd_v5_0_enable_mgcg(adev, true);
-
 	/* disable interupt */
 	/* disable interupt */
 	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
 	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
 
 
@@ -456,6 +456,8 @@ static void uvd_v5_0_stop(struct amdgpu_device *adev)
 
 
 	/* Unstall UMC and register bus */
 	/* Unstall UMC and register bus */
 	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
 	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+
+	WREG32(mmUVD_STATUS, 0);
 }
 }
 
 
 /**
 /**
@@ -792,9 +794,6 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 
 
-	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
-		return 0;
-
 	if (enable) {
 	if (enable) {
 		/* wait for STATUS to clear */
 		/* wait for STATUS to clear */
 		if (uvd_v5_0_wait_for_idle(handle))
 		if (uvd_v5_0_wait_for_idle(handle))
@@ -824,9 +823,6 @@ static int uvd_v5_0_set_powergating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	int ret = 0;
 	int ret = 0;
 
 
-	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
-		return 0;
-
 	if (state == AMD_PG_STATE_GATE) {
 	if (state == AMD_PG_STATE_GATE) {
 		uvd_v5_0_stop(adev);
 		uvd_v5_0_stop(adev);
 		adev->uvd.is_powergated = true;
 		adev->uvd.is_powergated = true;

+ 8 - 13
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c

@@ -155,9 +155,9 @@ static int uvd_v6_0_hw_init(void *handle)
 	uint32_t tmp;
 	uint32_t tmp;
 	int r;
 	int r;
 
 
-	r = uvd_v6_0_start(adev);
-	if (r)
-		goto done;
+	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
+	uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+	uvd_v6_0_enable_mgcg(adev, true);
 
 
 	ring->ready = true;
 	ring->ready = true;
 	r = amdgpu_ring_test_ring(ring);
 	r = amdgpu_ring_test_ring(ring);
@@ -212,7 +212,9 @@ static int uvd_v6_0_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_ring *ring = &adev->uvd.ring;
 	struct amdgpu_ring *ring = &adev->uvd.ring;
 
 
-	uvd_v6_0_stop(adev);
+	if (RREG32(mmUVD_STATUS) != 0)
+		uvd_v6_0_stop(adev);
+
 	ring->ready = false;
 	ring->ready = false;
 
 
 	return 0;
 	return 0;
@@ -397,9 +399,6 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
 	lmi_swap_cntl = 0;
 	lmi_swap_cntl = 0;
 	mp_swap_cntl = 0;
 	mp_swap_cntl = 0;
 
 
-	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
-	uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
-	uvd_v6_0_enable_mgcg(adev, true);
 	uvd_v6_0_mc_resume(adev);
 	uvd_v6_0_mc_resume(adev);
 
 
 	/* disable interupt */
 	/* disable interupt */
@@ -554,6 +553,8 @@ static void uvd_v6_0_stop(struct amdgpu_device *adev)
 
 
 	/* Unstall UMC and register bus */
 	/* Unstall UMC and register bus */
 	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
 	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+
+	WREG32(mmUVD_STATUS, 0);
 }
 }
 
 
 /**
 /**
@@ -1018,9 +1019,6 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 
 
-	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
-		return 0;
-
 	if (enable) {
 	if (enable) {
 		/* wait for STATUS to clear */
 		/* wait for STATUS to clear */
 		if (uvd_v6_0_wait_for_idle(handle))
 		if (uvd_v6_0_wait_for_idle(handle))
@@ -1049,9 +1047,6 @@ static int uvd_v6_0_set_powergating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	int ret = 0;
 	int ret = 0;
 
 
-	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
-		return 0;
-
 	WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
 	WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
 
 
 	if (state == AMD_PG_STATE_GATE) {
 	if (state == AMD_PG_STATE_GATE) {

+ 234 - 217
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c

@@ -42,10 +42,9 @@
 #define VCE_V2_0_DATA_SIZE	(23552 * AMDGPU_MAX_VCE_HANDLES)
 #define VCE_V2_0_DATA_SIZE	(23552 * AMDGPU_MAX_VCE_HANDLES)
 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK	0x02
 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK	0x02
 
 
-static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vce_v2_0_wait_for_idle(void *handle);
+
 /**
 /**
  * vce_v2_0_ring_get_rptr - get read pointer
  * vce_v2_0_ring_get_rptr - get read pointer
  *
  *
@@ -140,6 +139,86 @@ static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
 	return -ETIMEDOUT;
 	return -ETIMEDOUT;
 }
 }
 
 
+static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
+{
+	WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
+}
+
+static void vce_v2_0_init_cg(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	tmp = RREG32(mmVCE_CLOCK_GATING_A);
+	tmp &= ~0xfff;
+	tmp |= ((0 << 0) | (4 << 4));
+	tmp |= 0x40000;
+	WREG32(mmVCE_CLOCK_GATING_A, tmp);
+
+	tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+	tmp &= ~0xfff;
+	tmp |= ((0 << 0) | (4 << 4));
+	WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+	tmp = RREG32(mmVCE_CLOCK_GATING_B);
+	tmp |= 0x10;
+	tmp &= ~0x100000;
+	WREG32(mmVCE_CLOCK_GATING_B, tmp);
+}
+
+static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
+{
+	uint64_t addr = adev->vce.gpu_addr;
+	uint32_t size;
+
+	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
+	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
+	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
+	WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
+
+	WREG32(mmVCE_LMI_CTRL, 0x00398000);
+	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
+	WREG32(mmVCE_LMI_SWAP_CNTL, 0);
+	WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
+	WREG32(mmVCE_LMI_VM_CTRL, 0);
+
+	addr += AMDGPU_VCE_FIRMWARE_OFFSET;
+	size = VCE_V2_0_FW_SIZE;
+	WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
+	WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
+
+	addr += size;
+	size = VCE_V2_0_STACK_SIZE;
+	WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
+	WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
+
+	addr += size;
+	size = VCE_V2_0_DATA_SIZE;
+	WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
+	WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
+
+	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
+	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
+}
+
+static bool vce_v2_0_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
+}
+
+static int vce_v2_0_wait_for_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	unsigned i;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (vce_v2_0_is_idle(handle))
+			return 0;
+	}
+	return -ETIMEDOUT;
+}
+
 /**
 /**
  * vce_v2_0_start - start VCE block
  * vce_v2_0_start - start VCE block
  *
  *
@@ -152,11 +231,14 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
 	struct amdgpu_ring *ring;
 	struct amdgpu_ring *ring;
 	int r;
 	int r;
 
 
-	vce_v2_0_mc_resume(adev);
-
 	/* set BUSY flag */
 	/* set BUSY flag */
 	WREG32_P(mmVCE_STATUS, 1, ~1);
 	WREG32_P(mmVCE_STATUS, 1, ~1);
 
 
+	vce_v2_0_init_cg(adev);
+	vce_v2_0_disable_cg(adev);
+
+	vce_v2_0_mc_resume(adev);
+
 	ring = &adev->vce.ring[0];
 	ring = &adev->vce.ring[0];
 	WREG32(mmVCE_RB_RPTR, ring->wptr);
 	WREG32(mmVCE_RB_RPTR, ring->wptr);
 	WREG32(mmVCE_RB_WPTR, ring->wptr);
 	WREG32(mmVCE_RB_WPTR, ring->wptr);
@@ -189,6 +271,145 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
 	return 0;
 	return 0;
 }
 }
 
 
+static int vce_v2_0_stop(struct amdgpu_device *adev)
+{
+	int i, j;
+	int status;
+
+	if (vce_v2_0_lmi_clean(adev)) {
+		DRM_INFO("vce is not idle \n");
+		return 0;
+	}
+/*
+	for (i = 0; i < 10; ++i) {
+		for (j = 0; j < 100; ++j) {
+			status = RREG32(mmVCE_FW_REG_STATUS);
+			if (!(status & 1))
+				break;
+			mdelay(1);
+		}
+		break;
+	}
+*/
+	if (vce_v2_0_wait_for_idle(adev)) {
+		DRM_INFO("VCE is busy, Can't set clock gateing");
+		return 0;
+	}
+
+	/* Stall UMC and register bus before resetting VCPU */
+	WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
+
+	for (i = 0; i < 10; ++i) {
+		for (j = 0; j < 100; ++j) {
+			status = RREG32(mmVCE_LMI_STATUS);
+			if (status & 0x240)
+				break;
+			mdelay(1);
+		}
+		break;
+	}
+
+	WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
+
+	/* put LMI, VCPU, RBC etc... into reset */
+	WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
+
+	WREG32(mmVCE_STATUS, 0);
+
+	return 0;
+}
+
+static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
+{
+	u32 tmp;
+
+	if (gated) {
+		tmp = RREG32(mmVCE_CLOCK_GATING_B);
+		tmp |= 0xe70000;
+		WREG32(mmVCE_CLOCK_GATING_B, tmp);
+
+		tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+		tmp |= 0xff000000;
+		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+		tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+		tmp &= ~0x3fc;
+		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+
+		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
+	} else {
+		tmp = RREG32(mmVCE_CLOCK_GATING_B);
+		tmp |= 0xe7;
+		tmp &= ~0xe70000;
+		WREG32(mmVCE_CLOCK_GATING_B, tmp);
+
+		tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+		tmp |= 0x1fe000;
+		tmp &= ~0xff000000;
+		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+		tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+		tmp |= 0x3fc;
+		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+	}
+}
+
+static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
+{
+	u32 orig, tmp;
+
+/* LMI_MC/LMI_UMC always set in dynamic,
+ * set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0}
+ */
+	tmp = RREG32(mmVCE_CLOCK_GATING_B);
+	tmp &= ~0x00060006;
+
+/* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */
+	if (gated) {
+		tmp |= 0xe10000;
+		WREG32(mmVCE_CLOCK_GATING_B, tmp);
+	} else {
+		tmp |= 0xe1;
+		tmp &= ~0xe10000;
+		WREG32(mmVCE_CLOCK_GATING_B, tmp);
+	}
+
+	orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+	tmp &= ~0x1fe000;
+	tmp &= ~0xff000000;
+	if (tmp != orig)
+		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+	orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+	tmp &= ~0x3fc;
+	if (tmp != orig)
+		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+
+	/* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
+	WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
+
+	if(gated)
+		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
+}
+
+static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
+								bool sw_cg)
+{
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
+		if (sw_cg)
+			vce_v2_0_set_sw_cg(adev, true);
+		else
+			vce_v2_0_set_dyn_cg(adev, true);
+	} else {
+		vce_v2_0_disable_cg(adev);
+
+		if (sw_cg)
+			vce_v2_0_set_sw_cg(adev, false);
+		else
+			vce_v2_0_set_dyn_cg(adev, false);
+	}
+}
+
 static int vce_v2_0_early_init(void *handle)
 static int vce_v2_0_early_init(void *handle)
 {
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -254,11 +475,8 @@ static int vce_v2_0_hw_init(void *handle)
 	int r, i;
 	int r, i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
-	r = vce_v2_0_start(adev);
-	/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
-	if (r)
-		return 0;
-
+	amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
+	vce_v2_0_enable_mgcg(adev, true, false);
 	for (i = 0; i < adev->vce.num_rings; i++)
 	for (i = 0; i < adev->vce.num_rings; i++)
 		adev->vce.ring[i].ready = false;
 		adev->vce.ring[i].ready = false;
 
 
@@ -312,190 +530,6 @@ static int vce_v2_0_resume(void *handle)
 	return r;
 	return r;
 }
 }
 
 
-static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
-{
-	u32 tmp;
-
-	if (gated) {
-		tmp = RREG32(mmVCE_CLOCK_GATING_B);
-		tmp |= 0xe70000;
-		WREG32(mmVCE_CLOCK_GATING_B, tmp);
-
-		tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
-		tmp |= 0xff000000;
-		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
-
-		tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
-		tmp &= ~0x3fc;
-		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
-
-		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
-	} else {
-		tmp = RREG32(mmVCE_CLOCK_GATING_B);
-		tmp |= 0xe7;
-		tmp &= ~0xe70000;
-		WREG32(mmVCE_CLOCK_GATING_B, tmp);
-
-		tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
-		tmp |= 0x1fe000;
-		tmp &= ~0xff000000;
-		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
-
-		tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
-		tmp |= 0x3fc;
-		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
-	}
-}
-
-static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
-{
-	if (vce_v2_0_wait_for_idle(adev)) {
-		DRM_INFO("VCE is busy, Can't set clock gateing");
-		return;
-	}
-
-	WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100);
-
-	if (vce_v2_0_lmi_clean(adev)) {
-		DRM_INFO("LMI is busy, Can't set clock gateing");
-		return;
-	}
-
-	WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-	WREG32_P(mmVCE_SOFT_RESET,
-		 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-		 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-	WREG32(mmVCE_STATUS, 0);
-
-	if (gated)
-		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
-	/* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */
-	if (gated) {
-		/* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */
-		WREG32(mmVCE_CLOCK_GATING_B, 0xe90010);
-	} else {
-		/* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */
-		WREG32(mmVCE_CLOCK_GATING_B, 0x800f1);
-	}
-
-	/* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/;
-	WREG32(mmVCE_UENC_CLOCK_GATING, 0x40);
-
-	/* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
-	WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
-
-	WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100);
-	if(!gated) {
-		WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-		mdelay(100);
-		WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
-		vce_v2_0_firmware_loaded(adev);
-		WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
-	}
-}
-
-static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
-{
-	WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
-}
-
-static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
-{
-	bool sw_cg = false;
-
-	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
-		if (sw_cg)
-			vce_v2_0_set_sw_cg(adev, true);
-		else
-			vce_v2_0_set_dyn_cg(adev, true);
-	} else {
-		vce_v2_0_disable_cg(adev);
-
-		if (sw_cg)
-			vce_v2_0_set_sw_cg(adev, false);
-		else
-			vce_v2_0_set_dyn_cg(adev, false);
-	}
-}
-
-static void vce_v2_0_init_cg(struct amdgpu_device *adev)
-{
-	u32 tmp;
-
-	tmp = RREG32(mmVCE_CLOCK_GATING_A);
-	tmp &= ~0xfff;
-	tmp |= ((0 << 0) | (4 << 4));
-	tmp |= 0x40000;
-	WREG32(mmVCE_CLOCK_GATING_A, tmp);
-
-	tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
-	tmp &= ~0xfff;
-	tmp |= ((0 << 0) | (4 << 4));
-	WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
-
-	tmp = RREG32(mmVCE_CLOCK_GATING_B);
-	tmp |= 0x10;
-	tmp &= ~0x100000;
-	WREG32(mmVCE_CLOCK_GATING_B, tmp);
-}
-
-static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
-{
-	uint64_t addr = adev->vce.gpu_addr;
-	uint32_t size;
-
-	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
-	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
-	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
-	WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
-
-	WREG32(mmVCE_LMI_CTRL, 0x00398000);
-	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
-	WREG32(mmVCE_LMI_SWAP_CNTL, 0);
-	WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
-	WREG32(mmVCE_LMI_VM_CTRL, 0);
-
-	addr += AMDGPU_VCE_FIRMWARE_OFFSET;
-	size = VCE_V2_0_FW_SIZE;
-	WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
-	WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
-
-	addr += size;
-	size = VCE_V2_0_STACK_SIZE;
-	WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
-	WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
-
-	addr += size;
-	size = VCE_V2_0_DATA_SIZE;
-	WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
-	WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
-
-	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
-	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
-
-	vce_v2_0_init_cg(adev);
-}
-
-static bool vce_v2_0_is_idle(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
-}
-
-static int vce_v2_0_wait_for_idle(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	unsigned i;
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		if (vce_v2_0_is_idle(handle))
-			return 0;
-	}
-	return -ETIMEDOUT;
-}
-
 static int vce_v2_0_soft_reset(void *handle)
 static int vce_v2_0_soft_reset(void *handle)
 {
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -539,33 +573,20 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
 	return 0;
 	return 0;
 }
 }
 
 
-static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
-{
-	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
-
-	if (enable)
-		tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
-	else
-		tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
-
-	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
-}
-
-
 static int vce_v2_0_set_clockgating_state(void *handle,
 static int vce_v2_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 					  enum amd_clockgating_state state)
 {
 {
 	bool gate = false;
 	bool gate = false;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
-
+	bool sw_cg = false;
 
 
-	vce_v2_0_set_bypass_mode(adev, enable);
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
-	if (state == AMD_CG_STATE_GATE)
+	if (state == AMD_CG_STATE_GATE) {
 		gate = true;
 		gate = true;
+		sw_cg = true;
+	}
 
 
-	vce_v2_0_enable_mgcg(adev, gate);
+	vce_v2_0_enable_mgcg(adev, gate, sw_cg);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -582,12 +603,8 @@ static int vce_v2_0_set_powergating_state(void *handle,
 	 */
 	 */
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
-	if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
-		return 0;
-
 	if (state == AMD_PG_STATE_GATE)
 	if (state == AMD_PG_STATE_GATE)
-		/* XXX do we need a vce_v2_0_stop()? */
-		return 0;
+		return vce_v2_0_stop(adev);
 	else
 	else
 		return vce_v2_0_start(adev);
 		return vce_v2_0_start(adev);
 }
 }

+ 6 - 11
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c

@@ -230,10 +230,6 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
 	struct amdgpu_ring *ring;
 	struct amdgpu_ring *ring;
 	int idx, r;
 	int idx, r;
 
 
-	vce_v3_0_override_vce_clock_gating(adev, true);
-	if (!(adev->flags & AMD_IS_APU))
-		amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
-
 	ring = &adev->vce.ring[0];
 	ring = &adev->vce.ring[0];
 	WREG32(mmVCE_RB_RPTR, ring->wptr);
 	WREG32(mmVCE_RB_RPTR, ring->wptr);
 	WREG32(mmVCE_RB_WPTR, ring->wptr);
 	WREG32(mmVCE_RB_WPTR, ring->wptr);
@@ -436,9 +432,9 @@ static int vce_v3_0_hw_init(void *handle)
 	int r, i;
 	int r, i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
-	r = vce_v3_0_start(adev);
-	if (r)
-		return r;
+	vce_v3_0_override_vce_clock_gating(adev, true);
+	if (!(adev->flags & AMD_IS_APU))
+		amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
 
 
 	for (i = 0; i < adev->vce.num_rings; i++)
 	for (i = 0; i < adev->vce.num_rings; i++)
 		adev->vce.ring[i].ready = false;
 		adev->vce.ring[i].ready = false;
@@ -766,12 +762,11 @@ static int vce_v3_0_set_powergating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	int ret = 0;
 	int ret = 0;
 
 
-	if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
-		return 0;
-
 	if (state == AMD_PG_STATE_GATE) {
 	if (state == AMD_PG_STATE_GATE) {
+		ret = vce_v3_0_stop(adev);
+		if (ret)
+			goto out;
 		adev->vce.is_powergated = true;
 		adev->vce.is_powergated = true;
-		/* XXX do we need a vce_v3_0_stop()? */
 	} else {
 	} else {
 		ret = vce_v3_0_start(adev);
 		ret = vce_v3_0_start(adev);
 		if (ret)
 		if (ret)

+ 1 - 0
drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h

@@ -1310,5 +1310,6 @@
 #define ixROM_SW_DATA_62                                                        0xc060012c
 #define ixROM_SW_DATA_62                                                        0xc060012c
 #define ixROM_SW_DATA_63                                                        0xc0600130
 #define ixROM_SW_DATA_63                                                        0xc0600130
 #define ixROM_SW_DATA_64                                                        0xc0600134
 #define ixROM_SW_DATA_64                                                        0xc0600134
+#define ixCURRENT_PG_STATUS                                                     0xc020029c
 
 
 #endif /* SMU_7_0_1_D_H */
 #endif /* SMU_7_0_1_D_H */

+ 29 - 45
drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c

@@ -161,28 +161,25 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
 {
 {
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
 
 
-	if (cz_hwmgr->uvd_power_gated == bgate)
-		return 0;
-
 	cz_hwmgr->uvd_power_gated = bgate;
 	cz_hwmgr->uvd_power_gated = bgate;
 
 
 	if (bgate) {
 	if (bgate) {
-		cgs_set_clockgating_state(hwmgr->device,
-						AMD_IP_BLOCK_TYPE_UVD,
-						AMD_CG_STATE_GATE);
 		cgs_set_powergating_state(hwmgr->device,
 		cgs_set_powergating_state(hwmgr->device,
 						AMD_IP_BLOCK_TYPE_UVD,
 						AMD_IP_BLOCK_TYPE_UVD,
 						AMD_PG_STATE_GATE);
 						AMD_PG_STATE_GATE);
+		cgs_set_clockgating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_UVD,
+						AMD_CG_STATE_GATE);
 		cz_dpm_update_uvd_dpm(hwmgr, true);
 		cz_dpm_update_uvd_dpm(hwmgr, true);
 		cz_dpm_powerdown_uvd(hwmgr);
 		cz_dpm_powerdown_uvd(hwmgr);
 	} else {
 	} else {
 		cz_dpm_powerup_uvd(hwmgr);
 		cz_dpm_powerup_uvd(hwmgr);
-		cgs_set_powergating_state(hwmgr->device,
-						AMD_IP_BLOCK_TYPE_UVD,
-						AMD_CG_STATE_UNGATE);
 		cgs_set_clockgating_state(hwmgr->device,
 		cgs_set_clockgating_state(hwmgr->device,
 						AMD_IP_BLOCK_TYPE_UVD,
 						AMD_IP_BLOCK_TYPE_UVD,
 						AMD_PG_STATE_UNGATE);
 						AMD_PG_STATE_UNGATE);
+		cgs_set_powergating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_UVD,
+						AMD_CG_STATE_UNGATE);
 		cz_dpm_update_uvd_dpm(hwmgr, false);
 		cz_dpm_update_uvd_dpm(hwmgr, false);
 	}
 	}
 
 
@@ -193,47 +190,34 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
 {
 {
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
 
 
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_VCEPowerGating)) {
-		if (cz_hwmgr->vce_power_gated != bgate) {
-			if (bgate) {
-				cgs_set_clockgating_state(
-							hwmgr->device,
-							AMD_IP_BLOCK_TYPE_VCE,
-							AMD_CG_STATE_GATE);
-				cgs_set_powergating_state(
-							hwmgr->device,
-							AMD_IP_BLOCK_TYPE_VCE,
-							AMD_PG_STATE_GATE);
-				cz_enable_disable_vce_dpm(hwmgr, false);
-				cz_dpm_powerdown_vce(hwmgr);
-				cz_hwmgr->vce_power_gated = true;
-			} else {
-				cz_dpm_powerup_vce(hwmgr);
-				cz_hwmgr->vce_power_gated = false;
-				cgs_set_powergating_state(
-							hwmgr->device,
-							AMD_IP_BLOCK_TYPE_VCE,
-							AMD_CG_STATE_UNGATE);
-				cgs_set_clockgating_state(
-							hwmgr->device,
-							AMD_IP_BLOCK_TYPE_VCE,
-							AMD_PG_STATE_UNGATE);
-				cz_dpm_update_vce_dpm(hwmgr);
-				cz_enable_disable_vce_dpm(hwmgr, true);
-				return 0;
-			}
-		}
+	if (bgate) {
+		cgs_set_powergating_state(
+					hwmgr->device,
+					AMD_IP_BLOCK_TYPE_VCE,
+					AMD_PG_STATE_GATE);
+		cgs_set_clockgating_state(
+					hwmgr->device,
+					AMD_IP_BLOCK_TYPE_VCE,
+					AMD_CG_STATE_GATE);
+		cz_enable_disable_vce_dpm(hwmgr, false);
+		cz_dpm_powerdown_vce(hwmgr);
+		cz_hwmgr->vce_power_gated = true;
 	} else {
 	} else {
-		cz_hwmgr->vce_power_gated = bgate;
+		cz_dpm_powerup_vce(hwmgr);
+		cz_hwmgr->vce_power_gated = false;
+		cgs_set_clockgating_state(
+					hwmgr->device,
+					AMD_IP_BLOCK_TYPE_VCE,
+					AMD_PG_STATE_UNGATE);
+		cgs_set_powergating_state(
+					hwmgr->device,
+					AMD_IP_BLOCK_TYPE_VCE,
+					AMD_CG_STATE_UNGATE);
 		cz_dpm_update_vce_dpm(hwmgr);
 		cz_dpm_update_vce_dpm(hwmgr);
-		cz_enable_disable_vce_dpm(hwmgr, !bgate);
+		cz_enable_disable_vce_dpm(hwmgr, true);
 		return 0;
 		return 0;
 	}
 	}
 
 
-	if (!cz_hwmgr->vce_power_gated)
-		cz_dpm_update_vce_dpm(hwmgr);
-
 	return 0;
 	return 0;
 }
 }
 
 

+ 13 - 10
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c

@@ -147,22 +147,22 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
 	data->uvd_power_gated = bgate;
 	data->uvd_power_gated = bgate;
 
 
 	if (bgate) {
 	if (bgate) {
-		cgs_set_clockgating_state(hwmgr->device,
-				AMD_IP_BLOCK_TYPE_UVD,
-				AMD_CG_STATE_GATE);
 		cgs_set_powergating_state(hwmgr->device,
 		cgs_set_powergating_state(hwmgr->device,
 						AMD_IP_BLOCK_TYPE_UVD,
 						AMD_IP_BLOCK_TYPE_UVD,
 						AMD_PG_STATE_GATE);
 						AMD_PG_STATE_GATE);
+		cgs_set_clockgating_state(hwmgr->device,
+				AMD_IP_BLOCK_TYPE_UVD,
+				AMD_CG_STATE_GATE);
 		smu7_update_uvd_dpm(hwmgr, true);
 		smu7_update_uvd_dpm(hwmgr, true);
 		smu7_powerdown_uvd(hwmgr);
 		smu7_powerdown_uvd(hwmgr);
 	} else {
 	} else {
 		smu7_powerup_uvd(hwmgr);
 		smu7_powerup_uvd(hwmgr);
-		cgs_set_powergating_state(hwmgr->device,
-						AMD_IP_BLOCK_TYPE_UVD,
-						AMD_CG_STATE_UNGATE);
 		cgs_set_clockgating_state(hwmgr->device,
 		cgs_set_clockgating_state(hwmgr->device,
 				AMD_IP_BLOCK_TYPE_UVD,
 				AMD_IP_BLOCK_TYPE_UVD,
 				AMD_CG_STATE_UNGATE);
 				AMD_CG_STATE_UNGATE);
+		cgs_set_powergating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_UVD,
+						AMD_CG_STATE_UNGATE);
 		smu7_update_uvd_dpm(hwmgr, false);
 		smu7_update_uvd_dpm(hwmgr, false);
 	}
 	}
 
 
@@ -173,12 +173,12 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
 {
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
 
-	if (data->vce_power_gated == bgate)
-		return 0;
-
 	data->vce_power_gated = bgate;
 	data->vce_power_gated = bgate;
 
 
 	if (bgate) {
 	if (bgate) {
+		cgs_set_powergating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_VCE,
+						AMD_PG_STATE_UNGATE);
 		cgs_set_clockgating_state(hwmgr->device,
 		cgs_set_clockgating_state(hwmgr->device,
 				AMD_IP_BLOCK_TYPE_VCE,
 				AMD_IP_BLOCK_TYPE_VCE,
 				AMD_CG_STATE_GATE);
 				AMD_CG_STATE_GATE);
@@ -186,10 +186,13 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
 		smu7_powerdown_vce(hwmgr);
 		smu7_powerdown_vce(hwmgr);
 	} else {
 	} else {
 		smu7_powerup_vce(hwmgr);
 		smu7_powerup_vce(hwmgr);
-		smu7_update_vce_dpm(hwmgr, false);
 		cgs_set_clockgating_state(hwmgr->device,
 		cgs_set_clockgating_state(hwmgr->device,
 				AMD_IP_BLOCK_TYPE_VCE,
 				AMD_IP_BLOCK_TYPE_VCE,
 				AMD_CG_STATE_UNGATE);
 				AMD_CG_STATE_UNGATE);
+		cgs_set_powergating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_VCE,
+						AMD_PG_STATE_UNGATE);
+		smu7_update_vce_dpm(hwmgr, false);
 	}
 	}
 	return 0;
 	return 0;
 }
 }

+ 10 - 12
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c

@@ -2624,6 +2624,7 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
 		smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
 		smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
 		smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
 		smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
 		smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
 		smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
+
 		break;
 		break;
 	case AMD_DPM_FORCED_LEVEL_MANUAL:
 	case AMD_DPM_FORCED_LEVEL_MANUAL:
 		hwmgr->dpm_level = level;
 		hwmgr->dpm_level = level;
@@ -2633,9 +2634,9 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
 		break;
 		break;
 	}
 	}
 
 
-	if (level & (AMD_DPM_FORCED_LEVEL_PROFILE_PEAK | AMD_DPM_FORCED_LEVEL_HIGH))
+	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
 		smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
 		smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
-	else
+	else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
 		smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
 		smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
 
 
 	return 0;
 	return 0;
@@ -4397,16 +4398,14 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
 		if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
 		if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
 			return -EINVAL;
 			return -EINVAL;
 		dep_sclk_table = table_info->vdd_dep_on_sclk;
 		dep_sclk_table = table_info->vdd_dep_on_sclk;
-		for (i = 0; i < dep_sclk_table->count; i++) {
+		for (i = 0; i < dep_sclk_table->count; i++)
 			clocks->clock[i] = dep_sclk_table->entries[i].clk;
 			clocks->clock[i] = dep_sclk_table->entries[i].clk;
-			clocks->count++;
-		}
+		clocks->count = dep_sclk_table->count;
 	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
 	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
 		sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
 		sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
-		for (i = 0; i < sclk_table->count; i++) {
+		for (i = 0; i < sclk_table->count; i++)
 			clocks->clock[i] = sclk_table->entries[i].clk;
 			clocks->clock[i] = sclk_table->entries[i].clk;
-			clocks->count++;
-		}
+		clocks->count = sclk_table->count;
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -4440,14 +4439,13 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
 			clocks->clock[i] = dep_mclk_table->entries[i].clk;
 			clocks->clock[i] = dep_mclk_table->entries[i].clk;
 			clocks->latency[i] = smu7_get_mem_latency(hwmgr,
 			clocks->latency[i] = smu7_get_mem_latency(hwmgr,
 						dep_mclk_table->entries[i].clk);
 						dep_mclk_table->entries[i].clk);
-			clocks->count++;
 		}
 		}
+		clocks->count = dep_mclk_table->count;
 	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
 	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
 		mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
 		mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
-		for (i = 0; i < mclk_table->count; i++) {
+		for (i = 0; i < mclk_table->count; i++)
 			clocks->clock[i] = mclk_table->entries[i].clk;
 			clocks->clock[i] = mclk_table->entries[i].clk;
-			clocks->count++;
-		}
+		clocks->count = mclk_table->count;
 	}
 	}
 	return 0;
 	return 0;
 }
 }

+ 2 - 0
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c

@@ -37,8 +37,10 @@ MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
 MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
 MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
 
 
 
 

+ 1 - 2
drivers/gpu/drm/arc/arcpgu_drv.c

@@ -135,8 +135,7 @@ static int arcpgu_load(struct drm_device *drm)
 	drm_kms_helper_poll_init(drm);
 	drm_kms_helper_poll_init(drm);
 
 
 	arcpgu->fbdev = drm_fbdev_cma_init(drm, 16,
 	arcpgu->fbdev = drm_fbdev_cma_init(drm, 16,
-					      drm->mode_config.num_crtc,
-					      drm->mode_config.num_connector);
+					   drm->mode_config.num_connector);
 	if (IS_ERR(arcpgu->fbdev)) {
 	if (IS_ERR(arcpgu->fbdev)) {
 		ret = PTR_ERR(arcpgu->fbdev);
 		ret = PTR_ERR(arcpgu->fbdev);
 		arcpgu->fbdev = NULL;
 		arcpgu->fbdev = NULL;

+ 1 - 1
drivers/gpu/drm/arm/hdlcd_drv.c

@@ -349,7 +349,7 @@ static int hdlcd_drm_bind(struct device *dev)
 	drm_mode_config_reset(drm);
 	drm_mode_config_reset(drm);
 	drm_kms_helper_poll_init(drm);
 	drm_kms_helper_poll_init(drm);
 
 
-	hdlcd->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+	hdlcd->fbdev = drm_fbdev_cma_init(drm, 32,
 					  drm->mode_config.num_connector);
 					  drm->mode_config.num_connector);
 
 
 	if (IS_ERR(hdlcd->fbdev)) {
 	if (IS_ERR(hdlcd->fbdev)) {

+ 1 - 1
drivers/gpu/drm/arm/malidp_drv.c

@@ -457,7 +457,7 @@ static int malidp_bind(struct device *dev)
 
 
 	drm_mode_config_reset(drm);
 	drm_mode_config_reset(drm);
 
 
-	malidp->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+	malidp->fbdev = drm_fbdev_cma_init(drm, 32,
 					   drm->mode_config.num_connector);
 					   drm->mode_config.num_connector);
 
 
 	if (IS_ERR(malidp->fbdev)) {
 	if (IS_ERR(malidp->fbdev)) {

+ 1 - 1
drivers/gpu/drm/armada/armada_fbdev.c

@@ -137,7 +137,7 @@ int armada_fbdev_init(struct drm_device *dev)
 
 
 	drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);
 	drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);
 
 
-	ret = drm_fb_helper_init(dev, fbh, 1, 1);
+	ret = drm_fb_helper_init(dev, fbh, 1);
 	if (ret) {
 	if (ret) {
 		DRM_ERROR("failed to initialize drm fb helper\n");
 		DRM_ERROR("failed to initialize drm fb helper\n");
 		goto err_fb_helper;
 		goto err_fb_helper;

+ 2 - 2
drivers/gpu/drm/armada/armada_gem.c

@@ -148,8 +148,8 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
 			return -ENOSPC;
 			return -ENOSPC;
 
 
 		mutex_lock(&priv->linear_lock);
 		mutex_lock(&priv->linear_lock);
-		ret = drm_mm_insert_node(&priv->linear, node, size, align,
-					 DRM_MM_SEARCH_DEFAULT);
+		ret = drm_mm_insert_node_generic(&priv->linear, node,
+						 size, align, 0, 0);
 		mutex_unlock(&priv->linear_lock);
 		mutex_unlock(&priv->linear_lock);
 		if (ret) {
 		if (ret) {
 			kfree(node);
 			kfree(node);

+ 1 - 2
drivers/gpu/drm/ast/ast_fb.c

@@ -315,8 +315,7 @@ int ast_fbdev_init(struct drm_device *dev)
 
 
 	drm_fb_helper_prepare(dev, &afbdev->helper, &ast_fb_helper_funcs);
 	drm_fb_helper_prepare(dev, &afbdev->helper, &ast_fb_helper_funcs);
 
 
-	ret = drm_fb_helper_init(dev, &afbdev->helper,
-				 1, 1);
+	ret = drm_fb_helper_init(dev, &afbdev->helper, 1);
 	if (ret)
 	if (ret)
 		goto free;
 		goto free;
 
 

+ 0 - 1
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c

@@ -647,7 +647,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
 	platform_set_drvdata(pdev, dev);
 	platform_set_drvdata(pdev, dev);
 
 
 	dc->fbdev = drm_fbdev_cma_init(dev, 24,
 	dc->fbdev = drm_fbdev_cma_init(dev, 24,
-			dev->mode_config.num_crtc,
 			dev->mode_config.num_connector);
 			dev->mode_config.num_connector);
 	if (IS_ERR(dc->fbdev))
 	if (IS_ERR(dc->fbdev))
 		dc->fbdev = NULL;
 		dc->fbdev = NULL;

+ 10 - 0
drivers/gpu/drm/bochs/bochs_drv.c

@@ -12,6 +12,10 @@
 
 
 #include "bochs.h"
 #include "bochs.h"
 
 
+static int bochs_modeset = -1;
+module_param_named(modeset, bochs_modeset, int, 0444);
+MODULE_PARM_DESC(modeset, "enable/disable kernel modesetting");
+
 static bool enable_fbdev = true;
 static bool enable_fbdev = true;
 module_param_named(fbdev, enable_fbdev, bool, 0444);
 module_param_named(fbdev, enable_fbdev, bool, 0444);
 MODULE_PARM_DESC(fbdev, "register fbdev device");
 MODULE_PARM_DESC(fbdev, "register fbdev device");
@@ -214,6 +218,12 @@ static struct pci_driver bochs_pci_driver = {
 
 
 static int __init bochs_init(void)
 static int __init bochs_init(void)
 {
 {
+	if (vgacon_text_force() && bochs_modeset == -1)
+		return -EINVAL;
+
+	if (bochs_modeset == 0)
+		return -EINVAL;
+
 	return drm_pci_init(&bochs_driver, &bochs_pci_driver);
 	return drm_pci_init(&bochs_driver, &bochs_pci_driver);
 }
 }
 
 

+ 1 - 2
drivers/gpu/drm/bochs/bochs_fbdev.c

@@ -169,8 +169,7 @@ int bochs_fbdev_init(struct bochs_device *bochs)
 	drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper,
 	drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper,
 			      &bochs_fb_helper_funcs);
 			      &bochs_fb_helper_funcs);
 
 
-	ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper,
-				 1, 1);
+	ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, 1);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 

+ 815 - 134
drivers/gpu/drm/bridge/sil-sii8620.c

@@ -9,6 +9,8 @@
  * published by the Free Software Foundation.
  * published by the Free Software Foundation.
  */
  */
 
 
+#include <asm/unaligned.h>
+
 #include <drm/bridge/mhl.h>
 #include <drm/bridge/mhl.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_edid.h>
@@ -28,7 +30,10 @@
 
 
 #include "sil-sii8620.h"
 #include "sil-sii8620.h"
 
 
-#define VAL_RX_HDMI_CTRL2_DEFVAL	VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
+#define SII8620_BURST_BUF_LEN 288
+#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
+#define MHL1_MAX_LCLK 225000
+#define MHL3_MAX_LCLK 600000
 
 
 enum sii8620_mode {
 enum sii8620_mode {
 	CM_DISCONNECTED,
 	CM_DISCONNECTED,
@@ -59,6 +64,9 @@ struct sii8620 {
 	struct regulator_bulk_data supplies[2];
 	struct regulator_bulk_data supplies[2];
 	struct mutex lock; /* context lock, protects fields below */
 	struct mutex lock; /* context lock, protects fields below */
 	int error;
 	int error;
+	int pixel_clock;
+	unsigned int use_packed_pixel:1;
+	int video_code;
 	enum sii8620_mode mode;
 	enum sii8620_mode mode;
 	enum sii8620_sink_type sink_type;
 	enum sii8620_sink_type sink_type;
 	u8 cbus_status;
 	u8 cbus_status;
@@ -66,11 +74,20 @@ struct sii8620 {
 	u8 xstat[MHL_XDS_SIZE];
 	u8 xstat[MHL_XDS_SIZE];
 	u8 devcap[MHL_DCAP_SIZE];
 	u8 devcap[MHL_DCAP_SIZE];
 	u8 xdevcap[MHL_XDC_SIZE];
 	u8 xdevcap[MHL_XDC_SIZE];
-	u8 avif[19];
+	u8 avif[HDMI_INFOFRAME_SIZE(AVI)];
 	struct edid *edid;
 	struct edid *edid;
 	unsigned int gen2_write_burst:1;
 	unsigned int gen2_write_burst:1;
 	enum sii8620_mt_state mt_state;
 	enum sii8620_mt_state mt_state;
 	struct list_head mt_queue;
 	struct list_head mt_queue;
+	struct {
+		int r_size;
+		int r_count;
+		int rx_ack;
+		int rx_count;
+		u8 rx_buf[32];
+		int tx_count;
+		u8 tx_buf[32];
+	} burst;
 };
 };
 
 
 struct sii8620_mt_msg;
 struct sii8620_mt_msg;
@@ -78,12 +95,15 @@ struct sii8620_mt_msg;
 typedef void (*sii8620_mt_msg_cb)(struct sii8620 *ctx,
 typedef void (*sii8620_mt_msg_cb)(struct sii8620 *ctx,
 				  struct sii8620_mt_msg *msg);
 				  struct sii8620_mt_msg *msg);
 
 
+typedef void (*sii8620_cb)(struct sii8620 *ctx, int ret);
+
 struct sii8620_mt_msg {
 struct sii8620_mt_msg {
 	struct list_head node;
 	struct list_head node;
 	u8 reg[4];
 	u8 reg[4];
 	u8 ret;
 	u8 ret;
 	sii8620_mt_msg_cb send;
 	sii8620_mt_msg_cb send;
 	sii8620_mt_msg_cb recv;
 	sii8620_mt_msg_cb recv;
+	sii8620_cb continuation;
 };
 };
 
 
 static const u8 sii8620_i2c_page[] = {
 static const u8 sii8620_i2c_page[] = {
@@ -101,6 +121,7 @@ static void sii8620_fetch_edid(struct sii8620 *ctx);
 static void sii8620_set_upstream_edid(struct sii8620 *ctx);
 static void sii8620_set_upstream_edid(struct sii8620 *ctx);
 static void sii8620_enable_hpd(struct sii8620 *ctx);
 static void sii8620_enable_hpd(struct sii8620 *ctx);
 static void sii8620_mhl_disconnected(struct sii8620 *ctx);
 static void sii8620_mhl_disconnected(struct sii8620 *ctx);
+static void sii8620_disconnect(struct sii8620 *ctx);
 
 
 static int sii8620_clear_error(struct sii8620 *ctx)
 static int sii8620_clear_error(struct sii8620 *ctx)
 {
 {
@@ -227,6 +248,11 @@ static void sii8620_setbits(struct sii8620 *ctx, u16 addr, u8 mask, u8 val)
 	sii8620_write(ctx, addr, val);
 	sii8620_write(ctx, addr, val);
 }
 }
 
 
+static inline bool sii8620_is_mhl3(struct sii8620 *ctx)
+{
+	return ctx->mode >= CM_MHL3;
+}
+
 static void sii8620_mt_cleanup(struct sii8620 *ctx)
 static void sii8620_mt_cleanup(struct sii8620 *ctx)
 {
 {
 	struct sii8620_mt_msg *msg, *n;
 	struct sii8620_mt_msg *msg, *n;
@@ -251,9 +277,11 @@ static void sii8620_mt_work(struct sii8620 *ctx)
 		ctx->mt_state = MT_STATE_READY;
 		ctx->mt_state = MT_STATE_READY;
 		msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg,
 		msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg,
 				       node);
 				       node);
+		list_del(&msg->node);
 		if (msg->recv)
 		if (msg->recv)
 			msg->recv(ctx, msg);
 			msg->recv(ctx, msg);
-		list_del(&msg->node);
+		if (msg->continuation)
+			msg->continuation(ctx, msg->ret);
 		kfree(msg);
 		kfree(msg);
 	}
 	}
 
 
@@ -266,9 +294,59 @@ static void sii8620_mt_work(struct sii8620 *ctx)
 		msg->send(ctx, msg);
 		msg->send(ctx, msg);
 }
 }
 
 
+static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx)
+{
+	u8 ctrl = BIT_MDT_RCV_CTRL_MDT_RCV_EN;
+
+	if (ctx->gen2_write_burst)
+		return;
+
+	if (ctx->mode >= CM_MHL1)
+		ctrl |= BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN;
+
+	sii8620_write_seq(ctx,
+		REG_MDT_RCV_TIMEOUT, 100,
+		REG_MDT_RCV_CTRL, ctrl
+	);
+	ctx->gen2_write_burst = 1;
+}
+
+static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx)
+{
+	if (!ctx->gen2_write_burst)
+		return;
+
+	sii8620_write_seq_static(ctx,
+		REG_MDT_XMIT_CTRL, 0,
+		REG_MDT_RCV_CTRL, 0
+	);
+	ctx->gen2_write_burst = 0;
+}
+
+static void sii8620_start_gen2_write_burst(struct sii8620 *ctx)
+{
+	sii8620_write_seq_static(ctx,
+		REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT
+			| BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR
+			| BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD
+			| BIT_MDT_XMIT_SM_ERROR,
+		REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY
+			| BIT_MDT_IDLE_AFTER_HAWB_DISABLE
+			| BIT_MDT_RFIFO_DATA_RDY
+	);
+	sii8620_enable_gen2_write_burst(ctx);
+}
+
 static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx,
 static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx,
 				    struct sii8620_mt_msg *msg)
 				    struct sii8620_mt_msg *msg)
 {
 {
+	if (msg->reg[0] == MHL_SET_INT &&
+	    msg->reg[1] == MHL_INT_REG(RCHANGE) &&
+	    msg->reg[2] == MHL_INT_RC_FEAT_REQ)
+		sii8620_enable_gen2_write_burst(ctx);
+	else
+		sii8620_disable_gen2_write_burst(ctx);
+
 	switch (msg->reg[0]) {
 	switch (msg->reg[0]) {
 	case MHL_WRITE_STAT:
 	case MHL_WRITE_STAT:
 	case MHL_SET_INT:
 	case MHL_SET_INT:
@@ -281,6 +359,12 @@ static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx,
 		sii8620_write(ctx, REG_MSC_COMMAND_START,
 		sii8620_write(ctx, REG_MSC_COMMAND_START,
 			      BIT_MSC_COMMAND_START_MSC_MSG);
 			      BIT_MSC_COMMAND_START_MSC_MSG);
 		break;
 		break;
+	case MHL_READ_DEVCAP_REG:
+	case MHL_READ_XDEVCAP_REG:
+		sii8620_write(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg[1]);
+		sii8620_write(ctx, REG_MSC_COMMAND_START,
+			      BIT_MSC_COMMAND_START_READ_DEVCAP);
+		break;
 	default:
 	default:
 		dev_err(ctx->dev, "%s: command %#x not supported\n", __func__,
 		dev_err(ctx->dev, "%s: command %#x not supported\n", __func__,
 			msg->reg[0]);
 			msg->reg[0]);
@@ -299,6 +383,21 @@ static struct sii8620_mt_msg *sii8620_mt_msg_new(struct sii8620 *ctx)
 	return msg;
 	return msg;
 }
 }
 
 
+static void sii8620_mt_set_cont(struct sii8620 *ctx, sii8620_cb cont)
+{
+	struct sii8620_mt_msg *msg;
+
+	if (ctx->error)
+		return;
+
+	if (list_empty(&ctx->mt_queue)) {
+		ctx->error = -EINVAL;
+		return;
+	}
+	msg = list_last_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+	msg->continuation = cont;
+}
+
 static void sii8620_mt_msc_cmd(struct sii8620 *ctx, u8 cmd, u8 arg1, u8 arg2)
 static void sii8620_mt_msc_cmd(struct sii8620 *ctx, u8 cmd, u8 arg1, u8 arg2)
 {
 {
 	struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
 	struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
@@ -358,7 +457,7 @@ static void sii8620_update_array(u8 *dst, u8 *src, int count)
 	}
 	}
 }
 }
 
 
-static void sii8620_mr_devcap(struct sii8620 *ctx)
+static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
 {
 {
 	static const char * const sink_str[] = {
 	static const char * const sink_str[] = {
 		[SINK_NONE] = "NONE",
 		[SINK_NONE] = "NONE",
@@ -366,23 +465,10 @@ static void sii8620_mr_devcap(struct sii8620 *ctx)
 		[SINK_DVI] = "DVI"
 		[SINK_DVI] = "DVI"
 	};
 	};
 
 
-	u8 dcap[MHL_DCAP_SIZE];
 	char sink_name[20];
 	char sink_name[20];
 	struct device *dev = ctx->dev;
 	struct device *dev = ctx->dev;
 
 
-	sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE);
-	if (ctx->error < 0)
-		return;
-
-	dev_info(dev, "dcap: %*ph\n", MHL_DCAP_SIZE, dcap);
-	dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n",
-		 dcap[MHL_DCAP_MHL_VERSION] / 16,
-		 dcap[MHL_DCAP_MHL_VERSION] % 16, dcap[MHL_DCAP_ADOPTER_ID_H],
-		 dcap[MHL_DCAP_ADOPTER_ID_L], dcap[MHL_DCAP_DEVICE_ID_H],
-		 dcap[MHL_DCAP_DEVICE_ID_L]);
-	sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
-
-	if (!(dcap[MHL_DCAP_CAT] & MHL_DCAP_CAT_SINK))
+	if (ret < 0)
 		return;
 		return;
 
 
 	sii8620_fetch_edid(ctx);
 	sii8620_fetch_edid(ctx);
@@ -401,18 +487,76 @@ static void sii8620_mr_devcap(struct sii8620 *ctx)
 
 
 	dev_info(dev, "detected sink(type: %s): %s\n",
 	dev_info(dev, "detected sink(type: %s): %s\n",
 		 sink_str[ctx->sink_type], sink_name);
 		 sink_str[ctx->sink_type], sink_name);
+}
+
+static void sii8620_hsic_init(struct sii8620 *ctx)
+{
+	if (!sii8620_is_mhl3(ctx))
+		return;
+
+	sii8620_write(ctx, REG_FCGC,
+		BIT_FCGC_HSIC_HOSTMODE | BIT_FCGC_HSIC_ENABLE);
+	sii8620_setbits(ctx, REG_HRXCTRL3,
+		BIT_HRXCTRL3_HRX_STAY_RESET | BIT_HRXCTRL3_STATUS_EN, ~0);
+	sii8620_setbits(ctx, REG_TTXNUMB, MSK_TTXNUMB_TTX_NUMBPS, 4);
+	sii8620_setbits(ctx, REG_TRXCTRL, BIT_TRXCTRL_TRX_FROM_SE_COC, ~0);
+	sii8620_setbits(ctx, REG_HTXCTRL, BIT_HTXCTRL_HTX_DRVCONN1, 0);
+	sii8620_setbits(ctx, REG_KEEPER, MSK_KEEPER_MODE, VAL_KEEPER_MODE_HOST);
+	sii8620_write_seq_static(ctx,
+		REG_TDMLLCTL, 0,
+		REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST |
+			BIT_UTSRST_KEEPER_SRST | BIT_UTSRST_FC_SRST,
+		REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST,
+		REG_HRXINTL, 0xff,
+		REG_HRXINTH, 0xff,
+		REG_TTXINTL, 0xff,
+		REG_TTXINTH, 0xff,
+		REG_TRXINTL, 0xff,
+		REG_TRXINTH, 0xff,
+		REG_HTXINTL, 0xff,
+		REG_HTXINTH, 0xff,
+		REG_FCINTR0, 0xff,
+		REG_FCINTR1, 0xff,
+		REG_FCINTR2, 0xff,
+		REG_FCINTR3, 0xff,
+		REG_FCINTR4, 0xff,
+		REG_FCINTR5, 0xff,
+		REG_FCINTR6, 0xff,
+		REG_FCINTR7, 0xff
+	);
+}
+
+static void sii8620_edid_read(struct sii8620 *ctx, int ret)
+{
+	if (ret < 0)
+		return;
+
 	sii8620_set_upstream_edid(ctx);
 	sii8620_set_upstream_edid(ctx);
+	sii8620_hsic_init(ctx);
 	sii8620_enable_hpd(ctx);
 	sii8620_enable_hpd(ctx);
 }
 }
 
 
+static void sii8620_mr_devcap(struct sii8620 *ctx)
+{
+	u8 dcap[MHL_DCAP_SIZE];
+	struct device *dev = ctx->dev;
+
+	sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE);
+	if (ctx->error < 0)
+		return;
+
+	dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n",
+		 dcap[MHL_DCAP_MHL_VERSION] / 16,
+		 dcap[MHL_DCAP_MHL_VERSION] % 16,
+		 dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L],
+		 dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]);
+	sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
+}
+
 static void sii8620_mr_xdevcap(struct sii8620 *ctx)
 static void sii8620_mr_xdevcap(struct sii8620 *ctx)
 {
 {
 	sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, ctx->xdevcap,
 	sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, ctx->xdevcap,
 			 MHL_XDC_SIZE);
 			 MHL_XDC_SIZE);
-
-	sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE),
-			      MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT);
-	sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP);
 }
 }
 
 
 static void sii8620_mt_read_devcap_recv(struct sii8620 *ctx,
 static void sii8620_mt_read_devcap_recv(struct sii8620 *ctx,
@@ -450,6 +594,197 @@ static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
 	msg->recv = sii8620_mt_read_devcap_recv;
 	msg->recv = sii8620_mt_read_devcap_recv;
 }
 }
 
 
+static void sii8620_mt_read_devcap_reg_recv(struct sii8620 *ctx,
+		struct sii8620_mt_msg *msg)
+{
+	u8 reg = msg->reg[0] & 0x7f;
+
+	if (msg->reg[0] & 0x80)
+		ctx->xdevcap[reg] = msg->ret;
+	else
+		ctx->devcap[reg] = msg->ret;
+}
+
+static void sii8620_mt_read_devcap_reg(struct sii8620 *ctx, u8 reg)
+{
+	struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+	if (!msg)
+		return;
+
+	msg->reg[0] = (reg & 0x80) ? MHL_READ_XDEVCAP_REG : MHL_READ_DEVCAP_REG;
+	msg->reg[1] = reg;
+	msg->send = sii8620_mt_msc_cmd_send;
+	msg->recv = sii8620_mt_read_devcap_reg_recv;
+}
+
+static inline void sii8620_mt_read_xdevcap_reg(struct sii8620 *ctx, u8 reg)
+{
+	sii8620_mt_read_devcap_reg(ctx, reg | 0x80);
+}
+
+static void *sii8620_burst_get_tx_buf(struct sii8620 *ctx, int len)
+{
+	u8 *buf = &ctx->burst.tx_buf[ctx->burst.tx_count];
+	int size = len + 2;
+
+	if (ctx->burst.tx_count + size > ARRAY_SIZE(ctx->burst.tx_buf)) {
+		dev_err(ctx->dev, "TX-BLK buffer exhausted\n");
+		ctx->error = -EINVAL;
+		return NULL;
+	}
+
+	ctx->burst.tx_count += size;
+	buf[1] = len;
+
+	return buf + 2;
+}
+
+static u8 *sii8620_burst_get_rx_buf(struct sii8620 *ctx, int len)
+{
+	u8 *buf = &ctx->burst.rx_buf[ctx->burst.rx_count];
+	int size = len + 1;
+
+	if (ctx->burst.tx_count + size > ARRAY_SIZE(ctx->burst.tx_buf)) {
+		dev_err(ctx->dev, "RX-BLK buffer exhausted\n");
+		ctx->error = -EINVAL;
+		return NULL;
+	}
+
+	ctx->burst.rx_count += size;
+	buf[0] = len;
+
+	return buf + 1;
+}
+
+static void sii8620_burst_send(struct sii8620 *ctx)
+{
+	int tx_left = ctx->burst.tx_count;
+	u8 *d = ctx->burst.tx_buf;
+
+	while (tx_left > 0) {
+		int len = d[1] + 2;
+
+		if (ctx->burst.r_count + len > ctx->burst.r_size)
+			break;
+		d[0] = min(ctx->burst.rx_ack, 255);
+		ctx->burst.rx_ack -= d[0];
+		sii8620_write_buf(ctx, REG_EMSC_XMIT_WRITE_PORT, d, len);
+		ctx->burst.r_count += len;
+		tx_left -= len;
+		d += len;
+	}
+
+	ctx->burst.tx_count = tx_left;
+
+	while (ctx->burst.rx_ack > 0) {
+		u8 b[2] = { min(ctx->burst.rx_ack, 255), 0 };
+
+		if (ctx->burst.r_count + 2 > ctx->burst.r_size)
+			break;
+		ctx->burst.rx_ack -= b[0];
+		sii8620_write_buf(ctx, REG_EMSC_XMIT_WRITE_PORT, b, 2);
+		ctx->burst.r_count += 2;
+	}
+}
+
+static void sii8620_burst_receive(struct sii8620 *ctx)
+{
+	u8 buf[3], *d;
+	int count;
+
+	sii8620_read_buf(ctx, REG_EMSCRFIFOBCNTL, buf, 2);
+	count = get_unaligned_le16(buf);
+	while (count > 0) {
+		int len = min(count, 3);
+
+		sii8620_read_buf(ctx, REG_EMSC_RCV_READ_PORT, buf, len);
+		count -= len;
+		ctx->burst.rx_ack += len - 1;
+		ctx->burst.r_count -= buf[1];
+		if (ctx->burst.r_count < 0)
+			ctx->burst.r_count = 0;
+
+		if (len < 3 || !buf[2])
+			continue;
+
+		len = buf[2];
+		d = sii8620_burst_get_rx_buf(ctx, len);
+		if (!d)
+			continue;
+		sii8620_read_buf(ctx, REG_EMSC_RCV_READ_PORT, d, len);
+		count -= len;
+		ctx->burst.rx_ack += len;
+	}
+}
+
+static void sii8620_burst_tx_rbuf_info(struct sii8620 *ctx, int size)
+{
+	struct mhl_burst_blk_rcv_buffer_info *d =
+		sii8620_burst_get_tx_buf(ctx, sizeof(*d));
+	if (!d)
+		return;
+
+	d->id = cpu_to_be16(MHL_BURST_ID_BLK_RCV_BUFFER_INFO);
+	d->size = cpu_to_le16(size);
+}
+
+static u8 sii8620_checksum(void *ptr, int size)
+{
+	u8 *d = ptr, sum = 0;
+
+	while (size--)
+		sum += *d++;
+
+	return sum;
+}
+
+static void sii8620_mhl_burst_hdr_set(struct mhl3_burst_header *h,
+	enum mhl_burst_id id)
+{
+	h->id = cpu_to_be16(id);
+	h->total_entries = 1;
+	h->sequence_index = 1;
+}
+
+static void sii8620_burst_tx_bits_per_pixel_fmt(struct sii8620 *ctx, u8 fmt)
+{
+	struct mhl_burst_bits_per_pixel_fmt *d;
+	const int size = sizeof(*d) + sizeof(d->desc[0]);
+
+	d = sii8620_burst_get_tx_buf(ctx, size);
+	if (!d)
+		return;
+
+	sii8620_mhl_burst_hdr_set(&d->hdr, MHL_BURST_ID_BITS_PER_PIXEL_FMT);
+	d->num_entries = 1;
+	d->desc[0].stream_id = 0;
+	d->desc[0].pixel_format = fmt;
+	d->hdr.checksum -= sii8620_checksum(d, size);
+}
+
+static void sii8620_burst_rx_all(struct sii8620 *ctx)
+{
+	u8 *d = ctx->burst.rx_buf;
+	int count = ctx->burst.rx_count;
+
+	while (count-- > 0) {
+		int len = *d++;
+		int id = get_unaligned_be16(&d[0]);
+
+		switch (id) {
+		case MHL_BURST_ID_BLK_RCV_BUFFER_INFO:
+			ctx->burst.r_size = get_unaligned_le16(&d[2]);
+			break;
+		default:
+			break;
+		}
+		count -= len;
+		d += len;
+	}
+	ctx->burst.rx_count = 0;
+}
+
 static void sii8620_fetch_edid(struct sii8620 *ctx)
 static void sii8620_fetch_edid(struct sii8620 *ctx)
 {
 {
 	u8 lm_ddc, ddc_cmd, int3, cbus;
 	u8 lm_ddc, ddc_cmd, int3, cbus;
@@ -537,12 +872,12 @@ static void sii8620_fetch_edid(struct sii8620 *ctx)
 				edid = new_edid;
 				edid = new_edid;
 			}
 			}
 		}
 		}
-
-		if (fetched + FETCH_SIZE == edid_len)
-			sii8620_write(ctx, REG_INTR3, int3);
 	}
 	}
 
 
-	sii8620_write(ctx, REG_LM_DDC, lm_ddc);
+	sii8620_write_seq(ctx,
+		REG_INTR3_MASK, BIT_DDC_CMD_DONE,
+		REG_LM_DDC, lm_ddc
+	);
 
 
 end:
 end:
 	kfree(ctx->edid);
 	kfree(ctx->edid);
@@ -641,11 +976,10 @@ static void sii8620_hw_reset(struct sii8620 *ctx)
 
 
 static void sii8620_cbus_reset(struct sii8620 *ctx)
 static void sii8620_cbus_reset(struct sii8620 *ctx)
 {
 {
-	sii8620_write_seq_static(ctx,
-		REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
-			| BIT_PWD_SRST_CBUS_RST_SW_EN,
-		REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN
-	);
+	sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
+		      | BIT_PWD_SRST_CBUS_RST_SW_EN);
+	usleep_range(10000, 20000);
+	sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN);
 }
 }
 
 
 static void sii8620_set_auto_zone(struct sii8620 *ctx)
 static void sii8620_set_auto_zone(struct sii8620 *ctx)
@@ -683,48 +1017,208 @@ static void sii8620_stop_video(struct sii8620 *ctx)
 			| BIT_TPI_SC_TPI_AV_MUTE;
 			| BIT_TPI_SC_TPI_AV_MUTE;
 		break;
 		break;
 	case SINK_HDMI:
 	case SINK_HDMI:
+	default:
 		val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
 		val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
 			| BIT_TPI_SC_TPI_AV_MUTE
 			| BIT_TPI_SC_TPI_AV_MUTE
 			| BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI;
 			| BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI;
 		break;
 		break;
-	default:
-		return;
 	}
 	}
 
 
 	sii8620_write(ctx, REG_TPI_SC, val);
 	sii8620_write(ctx, REG_TPI_SC, val);
 }
 }
 
 
+static void sii8620_set_format(struct sii8620 *ctx)
+{
+	u8 out_fmt;
+
+	if (sii8620_is_mhl3(ctx)) {
+		sii8620_setbits(ctx, REG_M3_P0CTRL,
+				BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
+				ctx->use_packed_pixel ? ~0 : 0);
+	} else {
+		if (ctx->use_packed_pixel)
+			sii8620_write_seq_static(ctx,
+				REG_VID_MODE, BIT_VID_MODE_M1080P,
+				REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
+				REG_MHLTX_CTL6, 0x60
+			);
+		else
+			sii8620_write_seq_static(ctx,
+				REG_VID_MODE, 0,
+				REG_MHL_TOP_CTL, 1,
+				REG_MHLTX_CTL6, 0xa0
+			);
+	}
+
+	if (ctx->use_packed_pixel)
+		out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) |
+			BIT_TPI_OUTPUT_CSCMODE709;
+	else
+		out_fmt = VAL_TPI_FORMAT(RGB, FULL);
+
+	sii8620_write_seq(ctx,
+		REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
+		REG_TPI_OUTPUT, out_fmt,
+	);
+}
+
+static int mhl3_infoframe_init(struct mhl3_infoframe *frame)
+{
+	memset(frame, 0, sizeof(*frame));
+
+	frame->version = 3;
+	frame->hev_format = -1;
+	return 0;
+}
+
+static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame,
+		 void *buffer, size_t size)
+{
+	const int frm_len = HDMI_INFOFRAME_HEADER_SIZE + MHL3_INFOFRAME_SIZE;
+	u8 *ptr = buffer;
+
+	if (size < frm_len)
+		return -ENOSPC;
+
+	memset(buffer, 0, size);
+	ptr[0] = HDMI_INFOFRAME_TYPE_VENDOR;
+	ptr[1] = frame->version;
+	ptr[2] = MHL3_INFOFRAME_SIZE;
+	ptr[4] = MHL3_IEEE_OUI & 0xff;
+	ptr[5] = (MHL3_IEEE_OUI >> 8) & 0xff;
+	ptr[6] = (MHL3_IEEE_OUI >> 16) & 0xff;
+	ptr[7] = frame->video_format & 0x3;
+	ptr[7] |= (frame->format_type & 0x7) << 2;
+	ptr[7] |= frame->sep_audio ? BIT(5) : 0;
+	if (frame->hev_format >= 0) {
+		ptr[9] = 1;
+		ptr[10] = (frame->hev_format >> 8) & 0xff;
+		ptr[11] = frame->hev_format & 0xff;
+	}
+	if (frame->av_delay) {
+		bool sign = frame->av_delay < 0;
+		int delay = sign ? -frame->av_delay : frame->av_delay;
+
+		ptr[12] = (delay >> 16) & 0xf;
+		if (sign)
+			ptr[12] |= BIT(4);
+		ptr[13] = (delay >> 8) & 0xff;
+		ptr[14] = delay & 0xff;
+	}
+	ptr[3] -= sii8620_checksum(buffer, frm_len);
+	return frm_len;
+}
+
+static void sii8620_set_infoframes(struct sii8620 *ctx)
+{
+	struct mhl3_infoframe mhl_frm;
+	union hdmi_infoframe frm;
+	u8 buf[31];
+	int ret;
+
+	if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) {
+		sii8620_write(ctx, REG_TPI_SC,
+			BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
+		sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3,
+			ARRAY_SIZE(ctx->avif) - 3);
+		sii8620_write(ctx, REG_PKT_FILTER_0,
+			BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
+			BIT_PKT_FILTER_0_DROP_MPEG_PKT |
+			BIT_PKT_FILTER_0_DROP_GCP_PKT,
+			BIT_PKT_FILTER_1_DROP_GEN_PKT);
+		return;
+	}
+
+	ret = hdmi_avi_infoframe_init(&frm.avi);
+	frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
+	frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
+	frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9;
+	frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709;
+	frm.avi.video_code = ctx->video_code;
+	if (!ret)
+		ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
+	if (ret > 0)
+		sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
+	sii8620_write(ctx, REG_PKT_FILTER_0,
+		BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
+		BIT_PKT_FILTER_0_DROP_MPEG_PKT |
+		BIT_PKT_FILTER_0_DROP_AVI_PKT |
+		BIT_PKT_FILTER_0_DROP_GCP_PKT,
+		BIT_PKT_FILTER_1_VSI_OVERRIDE_DIS |
+		BIT_PKT_FILTER_1_DROP_GEN_PKT |
+		BIT_PKT_FILTER_1_DROP_VSIF_PKT);
+
+	sii8620_write(ctx, REG_TPI_INFO_FSEL, BIT_TPI_INFO_FSEL_EN
+		| BIT_TPI_INFO_FSEL_RPT | VAL_TPI_INFO_FSEL_VSI);
+	ret = mhl3_infoframe_init(&mhl_frm);
+	if (!ret)
+		ret = mhl3_infoframe_pack(&mhl_frm, buf, ARRAY_SIZE(buf));
+	sii8620_write_buf(ctx, REG_TPI_INFO_B0, buf, ret);
+}
+
 static void sii8620_start_hdmi(struct sii8620 *ctx)
 static void sii8620_start_hdmi(struct sii8620 *ctx)
 {
 {
 	sii8620_write_seq_static(ctx,
 	sii8620_write_seq_static(ctx,
 		REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL
 		REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL
 			| BIT_RX_HDMI_CTRL2_USE_AV_MUTE,
 			| BIT_RX_HDMI_CTRL2_USE_AV_MUTE,
 		REG_VID_OVRRD, BIT_VID_OVRRD_PP_AUTO_DISABLE
 		REG_VID_OVRRD, BIT_VID_OVRRD_PP_AUTO_DISABLE
-			| BIT_VID_OVRRD_M1080P_OVRRD,
-		REG_VID_MODE, 0,
-		REG_MHL_TOP_CTL, 0x1,
-		REG_MHLTX_CTL6, 0xa0,
-		REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
-		REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
-	);
-
-	sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
-			      MHL_DST_LM_CLK_MODE_NORMAL |
-			      MHL_DST_LM_PATH_ENABLED);
+			| BIT_VID_OVRRD_M1080P_OVRRD);
+	sii8620_set_format(ctx);
 
 
-	sii8620_set_auto_zone(ctx);
+	if (!sii8620_is_mhl3(ctx)) {
+		sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+			MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED);
+		sii8620_set_auto_zone(ctx);
+	} else {
+		static const struct {
+			int max_clk;
+			u8 zone;
+			u8 link_rate;
+			u8 rrp_decode;
+		} clk_spec[] = {
+			{ 150000, VAL_TX_ZONE_CTL3_TX_ZONE_1_5GBPS,
+			  MHL_XDS_LINK_RATE_1_5_GBPS, 0x38 },
+			{ 300000, VAL_TX_ZONE_CTL3_TX_ZONE_3GBPS,
+			  MHL_XDS_LINK_RATE_3_0_GBPS, 0x40 },
+			{ 600000, VAL_TX_ZONE_CTL3_TX_ZONE_6GBPS,
+			  MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 },
+		};
+		u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN;
+		int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3);
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(clk_spec); ++i)
+			if (clk < clk_spec[i].max_clk)
+				break;
 
 
-	sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
+		if (100 * clk >= 98 * clk_spec[i].max_clk)
+			p0_ctrl |= BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN;
 
 
-	sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif,
-			  ARRAY_SIZE(ctx->avif));
+		sii8620_burst_tx_bits_per_pixel_fmt(ctx, ctx->use_packed_pixel);
+		sii8620_burst_send(ctx);
+		sii8620_write_seq(ctx,
+			REG_MHL_DP_CTL0, 0xf0,
+			REG_MHL3_TX_ZONE_CTL, clk_spec[i].zone);
+		sii8620_setbits(ctx, REG_M3_P0CTRL,
+			BIT_M3_P0CTRL_MHL3_P0_PORT_EN
+			| BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN, p0_ctrl);
+		sii8620_setbits(ctx, REG_M3_POSTM, MSK_M3_POSTM_RRP_DECODE,
+			clk_spec[i].rrp_decode);
+		sii8620_write_seq_static(ctx,
+			REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE
+				| BIT_M3_CTRL_H2M_SWRST,
+			REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE
+		);
+		sii8620_mt_write_stat(ctx, MHL_XDS_REG(AVLINK_MODE_CONTROL),
+			clk_spec[i].link_rate);
+	}
 
 
-	sii8620_write(ctx, REG_PKT_FILTER_0, 0xa1, 0x2);
+	sii8620_set_infoframes(ctx);
 }
 }
 
 
 static void sii8620_start_video(struct sii8620 *ctx)
 static void sii8620_start_video(struct sii8620 *ctx)
 {
 {
-	if (ctx->mode < CM_MHL3)
+	if (!sii8620_is_mhl3(ctx))
 		sii8620_stop_video(ctx);
 		sii8620_stop_video(ctx);
 
 
 	switch (ctx->sink_type) {
 	switch (ctx->sink_type) {
@@ -757,44 +1251,6 @@ static void sii8620_enable_hpd(struct sii8620 *ctx)
 	);
 	);
 }
 }
 
 
-static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx)
-{
-	if (ctx->gen2_write_burst)
-		return;
-
-	sii8620_write_seq_static(ctx,
-		REG_MDT_RCV_TIMEOUT, 100,
-		REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN
-	);
-	ctx->gen2_write_burst = 1;
-}
-
-static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx)
-{
-	if (!ctx->gen2_write_burst)
-		return;
-
-	sii8620_write_seq_static(ctx,
-		REG_MDT_XMIT_CTRL, 0,
-		REG_MDT_RCV_CTRL, 0
-	);
-	ctx->gen2_write_burst = 0;
-}
-
-static void sii8620_start_gen2_write_burst(struct sii8620 *ctx)
-{
-	sii8620_write_seq_static(ctx,
-		REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT
-			| BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR
-			| BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD
-			| BIT_MDT_XMIT_SM_ERROR,
-		REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY
-			| BIT_MDT_IDLE_AFTER_HAWB_DISABLE
-			| BIT_MDT_RFIFO_DATA_RDY
-	);
-	sii8620_enable_gen2_write_burst(ctx);
-}
-
 static void sii8620_mhl_discover(struct sii8620 *ctx)
 static void sii8620_mhl_discover(struct sii8620 *ctx)
 {
 {
 	sii8620_write_seq_static(ctx,
 	sii8620_write_seq_static(ctx,
@@ -838,7 +1294,7 @@ static void sii8620_mhl_discover(struct sii8620 *ctx)
 
 
 static void sii8620_peer_specific_init(struct sii8620 *ctx)
 static void sii8620_peer_specific_init(struct sii8620 *ctx)
 {
 {
-	if (ctx->mode == CM_MHL3)
+	if (sii8620_is_mhl3(ctx))
 		sii8620_write_seq_static(ctx,
 		sii8620_write_seq_static(ctx,
 			REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD,
 			REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD,
 			REG_EMSCINTRMASK1,
 			REG_EMSCINTRMASK1,
@@ -948,21 +1404,51 @@ static void sii8620_mhl_init(struct sii8620 *ctx)
 	);
 	);
 	sii8620_disable_gen2_write_burst(ctx);
 	sii8620_disable_gen2_write_burst(ctx);
 
 
-	/* currently MHL3 is not supported, so we force version to 0 */
-	sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), 0);
+	sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), SII8620_MHL_VERSION);
 	sii8620_mt_write_stat(ctx, MHL_DST_REG(CONNECTED_RDY),
 	sii8620_mt_write_stat(ctx, MHL_DST_REG(CONNECTED_RDY),
 			      MHL_DST_CONN_DCAP_RDY | MHL_DST_CONN_XDEVCAPP_SUPP
 			      MHL_DST_CONN_DCAP_RDY | MHL_DST_CONN_XDEVCAPP_SUPP
 			      | MHL_DST_CONN_POW_STAT);
 			      | MHL_DST_CONN_POW_STAT);
 	sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_DCAP_CHG);
 	sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_DCAP_CHG);
 }
 }
 
 
+static void sii8620_emsc_enable(struct sii8620 *ctx)
+{
+	u8 reg;
+
+	sii8620_setbits(ctx, REG_GENCTL, BIT_GENCTL_EMSC_EN
+					 | BIT_GENCTL_CLR_EMSC_RFIFO
+					 | BIT_GENCTL_CLR_EMSC_XFIFO, ~0);
+	sii8620_setbits(ctx, REG_GENCTL, BIT_GENCTL_CLR_EMSC_RFIFO
+					 | BIT_GENCTL_CLR_EMSC_XFIFO, 0);
+	sii8620_setbits(ctx, REG_COMMECNT, BIT_COMMECNT_I2C_TO_EMSC_EN, ~0);
+	reg = sii8620_readb(ctx, REG_EMSCINTR);
+	sii8620_write(ctx, REG_EMSCINTR, reg);
+	sii8620_write(ctx, REG_EMSCINTRMASK, BIT_EMSCINTR_SPI_DVLD);
+}
+
+static int sii8620_wait_for_fsm_state(struct sii8620 *ctx, u8 state)
+{
+	int i;
+
+	for (i = 0; i < 10; ++i) {
+		u8 s = sii8620_readb(ctx, REG_COC_STAT_0);
+
+		if ((s & MSK_COC_STAT_0_FSM_STATE) == state)
+			return 0;
+		if (!(s & BIT_COC_STAT_0_PLL_LOCKED))
+			return -EBUSY;
+		usleep_range(4000, 6000);
+	}
+	return -ETIMEDOUT;
+}
+
 static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
 static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
 {
 {
+	int ret;
+
 	if (ctx->mode == mode)
 	if (ctx->mode == mode)
 		return;
 		return;
 
 
-	ctx->mode = mode;
-
 	switch (mode) {
 	switch (mode) {
 	case CM_MHL1:
 	case CM_MHL1:
 		sii8620_write_seq_static(ctx,
 		sii8620_write_seq_static(ctx,
@@ -972,15 +1458,46 @@ static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
 				| BIT_DPD_OSC_EN,
 				| BIT_DPD_OSC_EN,
 			REG_COC_INTR_MASK, 0
 			REG_COC_INTR_MASK, 0
 		);
 		);
+		ctx->mode = mode;
 		break;
 		break;
 	case CM_MHL3:
 	case CM_MHL3:
+		sii8620_write(ctx, REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE);
+		ctx->mode = mode;
+		return;
+	case CM_ECBUS_S:
+		sii8620_emsc_enable(ctx);
 		sii8620_write_seq_static(ctx,
 		sii8620_write_seq_static(ctx,
-			REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
-			REG_COC_CTL0, 0x40,
-			REG_MHL_COC_CTL1, 0x07
+			REG_TTXSPINUMS, 4,
+			REG_TRXSPINUMS, 4,
+			REG_TTXHSICNUMS, 0x14,
+			REG_TRXHSICNUMS, 0x14,
+			REG_TTXTOTNUMS, 0x18,
+			REG_TRXTOTNUMS, 0x18,
+			REG_PWD_SRST, BIT_PWD_SRST_COC_DOC_RST
+				      | BIT_PWD_SRST_CBUS_RST_SW_EN,
+			REG_MHL_COC_CTL1, 0xbd,
+			REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN,
+			REG_COC_CTLB, 0x01,
+			REG_COC_CTL0, 0x5c,
+			REG_COC_CTL14, 0x03,
+			REG_COC_CTL15, 0x80,
+			REG_MHL_DP_CTL6, BIT_MHL_DP_CTL6_DP_TAP1_SGN
+					 | BIT_MHL_DP_CTL6_DP_TAP1_EN
+					 | BIT_MHL_DP_CTL6_DT_PREDRV_FEEDCAP_EN,
+			REG_MHL_DP_CTL8, 0x03
 		);
 		);
-		break;
+		ret = sii8620_wait_for_fsm_state(ctx, 0x03);
+		sii8620_write_seq_static(ctx,
+			REG_COC_CTL14, 0x00,
+			REG_COC_CTL15, 0x80
+		);
+		if (!ret)
+			sii8620_write(ctx, REG_CBUS3_CNVT, 0x85);
+		else
+			sii8620_disconnect(ctx);
+		return;
 	case CM_DISCONNECTED:
 	case CM_DISCONNECTED:
+		ctx->mode = mode;
 		break;
 		break;
 	default:
 	default:
 		dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode);
 		dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode);
@@ -1007,10 +1524,12 @@ static void sii8620_disconnect(struct sii8620 *ctx)
 {
 {
 	sii8620_disable_gen2_write_burst(ctx);
 	sii8620_disable_gen2_write_burst(ctx);
 	sii8620_stop_video(ctx);
 	sii8620_stop_video(ctx);
-	msleep(50);
+	msleep(100);
 	sii8620_cbus_reset(ctx);
 	sii8620_cbus_reset(ctx);
 	sii8620_set_mode(ctx, CM_DISCONNECTED);
 	sii8620_set_mode(ctx, CM_DISCONNECTED);
 	sii8620_write_seq_static(ctx,
 	sii8620_write_seq_static(ctx,
+		REG_TX_ZONE_CTL1, 0,
+		REG_MHL_PLL_CTL0, 0x07,
 		REG_COC_CTL0, 0x40,
 		REG_COC_CTL0, 0x40,
 		REG_CBUS3_CNVT, 0x84,
 		REG_CBUS3_CNVT, 0x84,
 		REG_COC_CTL14, 0x00,
 		REG_COC_CTL14, 0x00,
@@ -1123,24 +1642,45 @@ static void sii8620_irq_disc(struct sii8620 *ctx)
 	sii8620_write(ctx, REG_CBUS_DISC_INTR0, stat);
 	sii8620_write(ctx, REG_CBUS_DISC_INTR0, stat);
 }
 }
 
 
+static void sii8620_read_burst(struct sii8620 *ctx)
+{
+	u8 buf[17];
+
+	sii8620_read_buf(ctx, REG_MDT_RCV_READ_PORT, buf, ARRAY_SIZE(buf));
+	sii8620_write(ctx, REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN |
+		      BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN |
+		      BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_CUR);
+	sii8620_readb(ctx, REG_MDT_RFIFO_STAT);
+}
+
 static void sii8620_irq_g2wb(struct sii8620 *ctx)
 static void sii8620_irq_g2wb(struct sii8620 *ctx)
 {
 {
 	u8 stat = sii8620_readb(ctx, REG_MDT_INT_0);
 	u8 stat = sii8620_readb(ctx, REG_MDT_INT_0);
 
 
 	if (stat & BIT_MDT_IDLE_AFTER_HAWB_DISABLE)
 	if (stat & BIT_MDT_IDLE_AFTER_HAWB_DISABLE)
-		dev_dbg(ctx->dev, "HAWB idle\n");
+		if (sii8620_is_mhl3(ctx))
+			sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE),
+				MHL_INT_RC_FEAT_COMPLETE);
+
+	if (stat & BIT_MDT_RFIFO_DATA_RDY)
+		sii8620_read_burst(ctx);
+
+	if (stat & BIT_MDT_XFIFO_EMPTY)
+		sii8620_write(ctx, REG_MDT_XMIT_CTRL, 0);
 
 
 	sii8620_write(ctx, REG_MDT_INT_0, stat);
 	sii8620_write(ctx, REG_MDT_INT_0, stat);
 }
 }
 
 
-static void sii8620_status_changed_dcap(struct sii8620 *ctx)
+static void sii8620_status_dcap_ready(struct sii8620 *ctx)
 {
 {
-	if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) {
-		sii8620_set_mode(ctx, CM_MHL1);
-		sii8620_peer_specific_init(ctx);
-		sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE
-			       | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR);
-	}
+	enum sii8620_mode mode;
+
+	mode = ctx->stat[MHL_DST_VERSION] >= 0x30 ? CM_MHL3 : CM_MHL1;
+	if (mode > ctx->mode)
+		sii8620_set_mode(ctx, mode);
+	sii8620_peer_specific_init(ctx);
+	sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE
+		      | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR);
 }
 }
 
 
 static void sii8620_status_changed_path(struct sii8620 *ctx)
 static void sii8620_status_changed_path(struct sii8620 *ctx)
@@ -1149,7 +1689,9 @@ static void sii8620_status_changed_path(struct sii8620 *ctx)
 		sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
 		sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
 				      MHL_DST_LM_CLK_MODE_NORMAL
 				      MHL_DST_LM_CLK_MODE_NORMAL
 				      | MHL_DST_LM_PATH_ENABLED);
 				      | MHL_DST_LM_PATH_ENABLED);
-		sii8620_mt_read_devcap(ctx, false);
+		if (!sii8620_is_mhl3(ctx))
+			sii8620_mt_read_devcap(ctx, false);
+		sii8620_mt_set_cont(ctx, sii8620_sink_detected);
 	} else {
 	} else {
 		sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
 		sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
 				      MHL_DST_LM_CLK_MODE_NORMAL);
 				      MHL_DST_LM_CLK_MODE_NORMAL);
@@ -1166,19 +1708,75 @@ static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
 	sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
 	sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
 	sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
 	sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
 
 
-	if (st[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
-		sii8620_status_changed_dcap(ctx);
+	if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
+		sii8620_status_dcap_ready(ctx);
 
 
 	if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
 	if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
 		sii8620_status_changed_path(ctx);
 		sii8620_status_changed_path(ctx);
 }
 }
 
 
+static void sii8620_ecbus_up(struct sii8620 *ctx, int ret)
+{
+	if (ret < 0)
+		return;
+
+	sii8620_set_mode(ctx, CM_ECBUS_S);
+}
+
+static void sii8620_got_ecbus_speed(struct sii8620 *ctx, int ret)
+{
+	if (ret < 0)
+		return;
+
+	sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE),
+			      MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT);
+	sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP);
+	sii8620_mt_set_cont(ctx, sii8620_ecbus_up);
+}
+
+static void sii8620_mhl_burst_emsc_support_set(struct mhl_burst_emsc_support *d,
+	enum mhl_burst_id id)
+{
+	sii8620_mhl_burst_hdr_set(&d->hdr, MHL_BURST_ID_EMSC_SUPPORT);
+	d->num_entries = 1;
+	d->burst_id[0] = cpu_to_be16(id);
+}
+
+static void sii8620_send_features(struct sii8620 *ctx)
+{
+	u8 buf[16];
+
+	sii8620_write(ctx, REG_MDT_XMIT_CTRL, BIT_MDT_XMIT_CTRL_EN
+		| BIT_MDT_XMIT_CTRL_FIXED_BURST_LEN);
+	sii8620_mhl_burst_emsc_support_set((void *)buf,
+		MHL_BURST_ID_HID_PAYLOAD);
+	sii8620_write_buf(ctx, REG_MDT_XMIT_WRITE_PORT, buf, ARRAY_SIZE(buf));
+}
+
 static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
 static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
 {
 {
 	u8 ints[MHL_INT_SIZE];
 	u8 ints[MHL_INT_SIZE];
 
 
 	sii8620_read_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
 	sii8620_read_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
 	sii8620_write_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
 	sii8620_write_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+
+	if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_DCAP_CHG) {
+		switch (ctx->mode) {
+		case CM_MHL3:
+			sii8620_mt_read_xdevcap_reg(ctx, MHL_XDC_ECBUS_SPEEDS);
+			sii8620_mt_set_cont(ctx, sii8620_got_ecbus_speed);
+			break;
+		case CM_ECBUS_S:
+			sii8620_mt_read_devcap(ctx, true);
+			break;
+		default:
+			break;
+		}
+	}
+	if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ)
+		sii8620_send_features(ctx);
+	if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE)
+		sii8620_edid_read(ctx, 0);
 }
 }
 
 
 static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
 static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
@@ -1261,6 +1859,19 @@ static void sii8620_irq_coc(struct sii8620 *ctx)
 {
 {
 	u8 stat = sii8620_readb(ctx, REG_COC_INTR);
 	u8 stat = sii8620_readb(ctx, REG_COC_INTR);
 
 
+	if (stat & BIT_COC_CALIBRATION_DONE) {
+		u8 cstat = sii8620_readb(ctx, REG_COC_STAT_0);
+
+		cstat &= BIT_COC_STAT_0_PLL_LOCKED | MSK_COC_STAT_0_FSM_STATE;
+		if (cstat == (BIT_COC_STAT_0_PLL_LOCKED | 0x02)) {
+			sii8620_write_seq_static(ctx,
+				REG_COC_CTLB, 0,
+				REG_TRXINTMH, BIT_TDM_INTR_SYNC_DATA
+					      | BIT_TDM_INTR_SYNC_WAIT
+			);
+		}
+	}
+
 	sii8620_write(ctx, REG_COC_INTR, stat);
 	sii8620_write(ctx, REG_COC_INTR, stat);
 }
 }
 
 
@@ -1289,17 +1900,6 @@ static void sii8620_scdt_high(struct sii8620 *ctx)
 	);
 	);
 }
 }
 
 
-static void sii8620_scdt_low(struct sii8620 *ctx)
-{
-	sii8620_write(ctx, REG_TMDS_CSTAT_P3,
-		      BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS |
-		      BIT_TMDS_CSTAT_P3_CLR_AVI);
-
-	sii8620_stop_video(ctx);
-
-	sii8620_write(ctx, REG_INTR8_MASK, 0);
-}
-
 static void sii8620_irq_scdt(struct sii8620 *ctx)
 static void sii8620_irq_scdt(struct sii8620 *ctx)
 {
 {
 	u8 stat = sii8620_readb(ctx, REG_INTR5);
 	u8 stat = sii8620_readb(ctx, REG_INTR5);
@@ -1309,8 +1909,6 @@ static void sii8620_irq_scdt(struct sii8620 *ctx)
 
 
 		if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
 		if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
 			sii8620_scdt_high(ctx);
 			sii8620_scdt_high(ctx);
-		else
-			sii8620_scdt_low(ctx);
 	}
 	}
 
 
 	sii8620_write(ctx, REG_INTR5, stat);
 	sii8620_write(ctx, REG_INTR5, stat);
@@ -1351,6 +1949,65 @@ static void sii8620_irq_infr(struct sii8620 *ctx)
 		sii8620_start_video(ctx);
 		sii8620_start_video(ctx);
 }
 }
 
 
+static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret)
+{
+	if (ret < 0)
+		return;
+
+	sii8620_mt_read_devcap(ctx, false);
+}
+
+static void sii8620_irq_tdm(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_TRXINTH);
+	u8 tdm = sii8620_readb(ctx, REG_TRXSTA2);
+
+	if ((tdm & MSK_TDM_SYNCHRONIZED) == VAL_TDM_SYNCHRONIZED) {
+		ctx->mode = CM_ECBUS_S;
+		ctx->burst.rx_ack = 0;
+		ctx->burst.r_size = SII8620_BURST_BUF_LEN;
+		sii8620_burst_tx_rbuf_info(ctx, SII8620_BURST_BUF_LEN);
+		sii8620_mt_read_devcap(ctx, true);
+		sii8620_mt_set_cont(ctx, sii8620_got_xdevcap);
+	} else {
+		sii8620_write_seq_static(ctx,
+			REG_MHL_PLL_CTL2, 0,
+			REG_MHL_PLL_CTL2, BIT_MHL_PLL_CTL2_CLKDETECT_EN
+		);
+	}
+
+	sii8620_write(ctx, REG_TRXINTH, stat);
+}
+
+static void sii8620_irq_block(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_EMSCINTR);
+
+	if (stat & BIT_EMSCINTR_SPI_DVLD) {
+		u8 bstat = sii8620_readb(ctx, REG_SPIBURSTSTAT);
+
+		if (bstat & BIT_SPIBURSTSTAT_EMSC_NORMAL_MODE)
+			sii8620_burst_receive(ctx);
+	}
+
+	sii8620_write(ctx, REG_EMSCINTR, stat);
+}
+
+static void sii8620_irq_ddc(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_INTR3);
+
+	if (stat & BIT_DDC_CMD_DONE) {
+		sii8620_write(ctx, REG_INTR3_MASK, 0);
+		if (sii8620_is_mhl3(ctx))
+			sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE),
+					   MHL_INT_RC_FEAT_REQ);
+		else
+			sii8620_edid_read(ctx, 0);
+	}
+	sii8620_write(ctx, REG_INTR3, stat);
+}
+
 /* endian agnostic, non-volatile version of test_bit */
 /* endian agnostic, non-volatile version of test_bit */
 static bool sii8620_test_bit(unsigned int nr, const u8 *addr)
 static bool sii8620_test_bit(unsigned int nr, const u8 *addr)
 {
 {
@@ -1366,9 +2023,12 @@ static irqreturn_t sii8620_irq_thread(int irq, void *data)
 		{ BIT_FAST_INTR_STAT_DISC, sii8620_irq_disc },
 		{ BIT_FAST_INTR_STAT_DISC, sii8620_irq_disc },
 		{ BIT_FAST_INTR_STAT_G2WB, sii8620_irq_g2wb },
 		{ BIT_FAST_INTR_STAT_G2WB, sii8620_irq_g2wb },
 		{ BIT_FAST_INTR_STAT_COC, sii8620_irq_coc },
 		{ BIT_FAST_INTR_STAT_COC, sii8620_irq_coc },
+		{ BIT_FAST_INTR_STAT_TDM, sii8620_irq_tdm },
 		{ BIT_FAST_INTR_STAT_MSC, sii8620_irq_msc },
 		{ BIT_FAST_INTR_STAT_MSC, sii8620_irq_msc },
 		{ BIT_FAST_INTR_STAT_MERR, sii8620_irq_merr },
 		{ BIT_FAST_INTR_STAT_MERR, sii8620_irq_merr },
+		{ BIT_FAST_INTR_STAT_BLOCK, sii8620_irq_block },
 		{ BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
 		{ BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
+		{ BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc },
 		{ BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
 		{ BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
 		{ BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
 		{ BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
 	};
 	};
@@ -1383,7 +2043,9 @@ static irqreturn_t sii8620_irq_thread(int irq, void *data)
 		if (sii8620_test_bit(irq_vec[i].bit, stats))
 		if (sii8620_test_bit(irq_vec[i].bit, stats))
 			irq_vec[i].handler(ctx);
 			irq_vec[i].handler(ctx);
 
 
+	sii8620_burst_rx_all(ctx);
 	sii8620_mt_work(ctx);
 	sii8620_mt_work(ctx);
+	sii8620_burst_send(ctx);
 
 
 	ret = sii8620_clear_error(ctx);
 	ret = sii8620_clear_error(ctx);
 	if (ret) {
 	if (ret) {
@@ -1450,22 +2112,41 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
 			       struct drm_display_mode *adjusted_mode)
 			       struct drm_display_mode *adjusted_mode)
 {
 {
 	struct sii8620 *ctx = bridge_to_sii8620(bridge);
 	struct sii8620 *ctx = bridge_to_sii8620(bridge);
-	bool ret = false;
-	int max_clock = 74250;
+	int max_lclk;
+	bool ret = true;
 
 
 	mutex_lock(&ctx->lock);
 	mutex_lock(&ctx->lock);
 
 
-	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-		goto out;
-
-	if (ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL)
-		max_clock = 300000;
-
-	ret = mode->clock <= max_clock;
-
-out:
+	max_lclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : MHL1_MAX_LCLK;
+	if (max_lclk > 3 * adjusted_mode->clock) {
+		ctx->use_packed_pixel = 0;
+		goto end;
+	}
+	if ((ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) &&
+	    max_lclk > 2 * adjusted_mode->clock) {
+		ctx->use_packed_pixel = 1;
+		goto end;
+	}
+	ret = false;
+end:
+	if (ret) {
+		u8 vic = drm_match_cea_mode(adjusted_mode);
+
+		if (!vic) {
+			union hdmi_infoframe frm;
+			u8 mhl_vic[] = { 0, 95, 94, 93, 98 };
+
+			drm_hdmi_vendor_infoframe_from_display_mode(
+				&frm.vendor.hdmi, adjusted_mode);
+			vic = frm.vendor.hdmi.vic;
+			if (vic >= ARRAY_SIZE(mhl_vic))
+				vic = 0;
+			vic = mhl_vic[vic];
+		}
+		ctx->video_code = vic;
+		ctx->pixel_clock = adjusted_mode->clock;
+	}
 	mutex_unlock(&ctx->lock);
 	mutex_unlock(&ctx->lock);
-
 	return ret;
 	return ret;
 }
 }
 
 

+ 34 - 16
drivers/gpu/drm/bridge/sil-sii8620.h

@@ -353,7 +353,7 @@
 #define REG_TTXNUMB				0x0116
 #define REG_TTXNUMB				0x0116
 #define MSK_TTXNUMB_TTX_AFFCTRL_3_0		0xf0
 #define MSK_TTXNUMB_TTX_AFFCTRL_3_0		0xf0
 #define BIT_TTXNUMB_TTX_COM1_AT_SYNC_WAIT	BIT(3)
 #define BIT_TTXNUMB_TTX_COM1_AT_SYNC_WAIT	BIT(3)
-#define MSK_TTXNUMB_TTX_NUMBPS_2_0		0x07
+#define MSK_TTXNUMB_TTX_NUMBPS			0x07
 
 
 /* TDM TX NUMSPISYM, default value: 0x04 */
 /* TDM TX NUMSPISYM, default value: 0x04 */
 #define REG_TTXSPINUMS				0x0117
 #define REG_TTXSPINUMS				0x0117
@@ -403,12 +403,16 @@
 
 
 /* TDM RX Status 2nd, default value: 0x00 */
 /* TDM RX Status 2nd, default value: 0x00 */
 #define REG_TRXSTA2				0x015c
 #define REG_TRXSTA2				0x015c
+#define MSK_TDM_SYNCHRONIZED			0xc0
+#define VAL_TDM_SYNCHRONIZED			0x80
 
 
 /* TDM RX INT Low, default value: 0x00 */
 /* TDM RX INT Low, default value: 0x00 */
 #define REG_TRXINTL				0x0163
 #define REG_TRXINTL				0x0163
 
 
 /* TDM RX INT High, default value: 0x00 */
 /* TDM RX INT High, default value: 0x00 */
 #define REG_TRXINTH				0x0164
 #define REG_TRXINTH				0x0164
+#define BIT_TDM_INTR_SYNC_DATA			BIT(0)
+#define BIT_TDM_INTR_SYNC_WAIT			BIT(1)
 
 
 /* TDM RX INTMASK High, default value: 0x00 */
 /* TDM RX INTMASK High, default value: 0x00 */
 #define REG_TRXINTMH				0x0166
 #define REG_TRXINTMH				0x0166
@@ -429,12 +433,14 @@
 
 
 /* HSIC Keeper, default value: 0x00 */
 /* HSIC Keeper, default value: 0x00 */
 #define REG_KEEPER				0x0181
 #define REG_KEEPER				0x0181
-#define MSK_KEEPER_KEEPER_MODE_1_0		0x03
+#define MSK_KEEPER_MODE				0x03
+#define VAL_KEEPER_MODE_HOST			0
+#define VAL_KEEPER_MODE_DEVICE			2
 
 
 /* HSIC Flow Control General, default value: 0x02 */
 /* HSIC Flow Control General, default value: 0x02 */
 #define REG_FCGC				0x0183
 #define REG_FCGC				0x0183
-#define BIT_FCGC_HSIC_FC_HOSTMODE		BIT(1)
-#define BIT_FCGC_HSIC_FC_ENABLE			BIT(0)
+#define BIT_FCGC_HSIC_HOSTMODE			BIT(1)
+#define BIT_FCGC_HSIC_ENABLE			BIT(0)
 
 
 /* HSIC Flow Control CTR13, default value: 0xfc */
 /* HSIC Flow Control CTR13, default value: 0xfc */
 #define REG_FCCTR13				0x0191
 #define REG_FCCTR13				0x0191
@@ -841,6 +847,8 @@
 #define MSK_MHL_DP_CTL7_DT_DRV_VBIAS_CASCTL	0xf0
 #define MSK_MHL_DP_CTL7_DT_DRV_VBIAS_CASCTL	0xf0
 #define MSK_MHL_DP_CTL7_DT_DRV_IREF_CTL		0x0f
 #define MSK_MHL_DP_CTL7_DT_DRV_IREF_CTL		0x0f
 
 
+#define REG_MHL_DP_CTL8				0x0352
+
 /* Tx Zone Ctl1, default value: 0x00 */
 /* Tx Zone Ctl1, default value: 0x00 */
 #define REG_TX_ZONE_CTL1			0x0361
 #define REG_TX_ZONE_CTL1			0x0361
 #define VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE	0x08
 #define VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE	0x08
@@ -1078,16 +1086,26 @@
 
 
 /* TPI Info Frame Select, default value: 0x00 */
 /* TPI Info Frame Select, default value: 0x00 */
 #define REG_TPI_INFO_FSEL			0x06bf
 #define REG_TPI_INFO_FSEL			0x06bf
-#define BIT_TPI_INFO_FSEL_TPI_INFO_EN		BIT(7)
-#define BIT_TPI_INFO_FSEL_TPI_INFO_RPT		BIT(6)
-#define BIT_TPI_INFO_FSEL_TPI_INFO_READ_FLAG	BIT(5)
-#define MSK_TPI_INFO_FSEL_TPI_INFO_SEL		0x07
+#define BIT_TPI_INFO_FSEL_EN			BIT(7)
+#define BIT_TPI_INFO_FSEL_RPT			BIT(6)
+#define BIT_TPI_INFO_FSEL_READ_FLAG		BIT(5)
+#define MSK_TPI_INFO_FSEL_PKT			0x07
+#define VAL_TPI_INFO_FSEL_AVI			0x00
+#define VAL_TPI_INFO_FSEL_SPD			0x01
+#define VAL_TPI_INFO_FSEL_AUD			0x02
+#define VAL_TPI_INFO_FSEL_MPG			0x03
+#define VAL_TPI_INFO_FSEL_GEN			0x04
+#define VAL_TPI_INFO_FSEL_GEN2			0x05
+#define VAL_TPI_INFO_FSEL_VSI			0x06
 
 
 /* TPI Info Byte #0, default value: 0x00 */
 /* TPI Info Byte #0, default value: 0x00 */
 #define REG_TPI_INFO_B0				0x06c0
 #define REG_TPI_INFO_B0				0x06c0
 
 
 /* CoC Status, default value: 0x00 */
 /* CoC Status, default value: 0x00 */
 #define REG_COC_STAT_0				0x0700
 #define REG_COC_STAT_0				0x0700
+#define BIT_COC_STAT_0_PLL_LOCKED		BIT(7)
+#define MSK_COC_STAT_0_FSM_STATE		0x0f
+
 #define REG_COC_STAT_1				0x0701
 #define REG_COC_STAT_1				0x0701
 #define REG_COC_STAT_2				0x0702
 #define REG_COC_STAT_2				0x0702
 #define REG_COC_STAT_3				0x0703
 #define REG_COC_STAT_3				0x0703
@@ -1282,14 +1300,14 @@
 
 
 /* MDT Transmit Control, default value: 0x70 */
 /* MDT Transmit Control, default value: 0x70 */
 #define REG_MDT_XMIT_CTRL			0x0588
 #define REG_MDT_XMIT_CTRL			0x0588
-#define BIT_MDT_XMIT_CTRL_MDT_XMIT_EN		BIT(7)
-#define BIT_MDT_XMIT_CTRL_MDT_XMIT_CMD_MERGE_EN	BIT(6)
-#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_BURST_LEN BIT(5)
-#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_AID	BIT(4)
-#define BIT_MDT_XMIT_CTRL_MDT_XMIT_SINGLE_RUN_EN BIT(3)
-#define BIT_MDT_XMIT_CTRL_MDT_CLR_ABORT_WAIT	BIT(2)
-#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_ALL	BIT(1)
-#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_CUR	BIT(0)
+#define BIT_MDT_XMIT_CTRL_EN			BIT(7)
+#define BIT_MDT_XMIT_CTRL_CMD_MERGE_EN		BIT(6)
+#define BIT_MDT_XMIT_CTRL_FIXED_BURST_LEN	BIT(5)
+#define BIT_MDT_XMIT_CTRL_FIXED_AID		BIT(4)
+#define BIT_MDT_XMIT_CTRL_SINGLE_RUN_EN		BIT(3)
+#define BIT_MDT_XMIT_CTRL_CLR_ABORT_WAIT	BIT(2)
+#define BIT_MDT_XMIT_CTRL_XFIFO_CLR_ALL		BIT(1)
+#define BIT_MDT_XMIT_CTRL_XFIFO_CLR_CUR		BIT(0)
 
 
 /* MDT Receive WRITE Port, default value: 0x00 */
 /* MDT Receive WRITE Port, default value: 0x00 */
 #define REG_MDT_XMIT_WRITE_PORT			0x0589
 #define REG_MDT_XMIT_WRITE_PORT			0x0589

+ 1 - 1
drivers/gpu/drm/cirrus/cirrus_fbdev.c

@@ -289,7 +289,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
 			      &cirrus_fb_helper_funcs);
 			      &cirrus_fb_helper_funcs);
 
 
 	ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
 	ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
-				 cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
+				 CIRRUSFB_CONN_LIMIT);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 

+ 2 - 3
drivers/gpu/drm/drm_atomic.c

@@ -307,9 +307,8 @@ static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
  * @state: the CRTC whose incoming state to update
  * @state: the CRTC whose incoming state to update
  * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
  * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
  *
  *
- * Set a mode (originating from the kernel) on the desired CRTC state. Does
- * not change any other state properties, including enable, active, or
- * mode_changed.
+ * Set a mode (originating from the kernel) on the desired CRTC state and update
+ * the enable property.
  *
  *
  * RETURNS:
  * RETURNS:
  * Zero on success, error code on failure. Cannot return -EDEADLK.
  * Zero on success, error code on failure. Cannot return -EDEADLK.

+ 1 - 1
drivers/gpu/drm/drm_atomic_helper.c

@@ -369,7 +369,7 @@ mode_fixup(struct drm_atomic_state *state)
 	struct drm_connector *connector;
 	struct drm_connector *connector;
 	struct drm_connector_state *conn_state;
 	struct drm_connector_state *conn_state;
 	int i;
 	int i;
-	bool ret;
+	int ret;
 
 
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
 		if (!crtc_state->mode_changed &&
 		if (!crtc_state->mode_changed &&

+ 24 - 0
drivers/gpu/drm/drm_color_mgmt.c

@@ -87,6 +87,30 @@
  * "GAMMA_LUT" property above.
  * "GAMMA_LUT" property above.
  */
  */
 
 
+/**
+ * drm_color_lut_extract - clamp and round LUT entries
+ * @user_input: input value
+ * @bit_precision: number of bits the hw LUT supports
+ *
+ * Extract a degamma/gamma LUT value provided by user (in the form of
+ * &drm_color_lut entries) and round it to the precision supported by the
+ * hardware.
+ */
+uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision)
+{
+	uint32_t val = user_input;
+	uint32_t max = 0xffff >> (16 - bit_precision);
+
+	/* Round only if we're not using full precision. */
+	if (bit_precision < 16) {
+		val += 1UL << (16 - bit_precision - 1);
+		val >>= 16 - bit_precision;
+	}
+
+	return clamp_val(val, 0, max);
+}
+EXPORT_SYMBOL(drm_color_lut_extract);
+
 /**
 /**
  * drm_crtc_enable_color_mgmt - enable color management properties
  * drm_crtc_enable_color_mgmt - enable color management properties
  * @crtc: DRM CRTC
  * @crtc: DRM CRTC

+ 3 - 0
drivers/gpu/drm/drm_crtc_internal.h

@@ -207,3 +207,6 @@ int drm_mode_cursor2_ioctl(struct drm_device *dev,
 			   void *data, struct drm_file *file_priv);
 			   void *data, struct drm_file *file_priv);
 int drm_mode_page_flip_ioctl(struct drm_device *dev,
 int drm_mode_page_flip_ioctl(struct drm_device *dev,
 			     void *data, struct drm_file *file_priv);
 			     void *data, struct drm_file *file_priv);
+
+/* drm_edid.c */
+void drm_mode_fixup_1366x768(struct drm_display_mode *mode);

+ 45 - 20
drivers/gpu/drm/drm_drv.c

@@ -465,7 +465,10 @@ static void drm_fs_inode_free(struct inode *inode)
  * that do embed &struct drm_device it must be placed first in the overall
  * that do embed &struct drm_device it must be placed first in the overall
  * structure, and the overall structure must be allocated using kmalloc(): The
  * structure, and the overall structure must be allocated using kmalloc(): The
  * drm core's release function unconditionally calls kfree() on the @dev pointer
  * drm core's release function unconditionally calls kfree() on the @dev pointer
- * when the final reference is released.
+ * when the final reference is released. To override this behaviour, and so
+ * allow embedding of the drm_device inside the driver's device struct at an
+ * arbitrary offset, you must supply a &drm_driver.release callback and control
+ * the finalization explicitly.
  *
  *
  * RETURNS:
  * RETURNS:
  * 0 on success, or error code on failure.
  * 0 on success, or error code on failure.
@@ -552,6 +555,41 @@ err_free:
 }
 }
 EXPORT_SYMBOL(drm_dev_init);
 EXPORT_SYMBOL(drm_dev_init);
 
 
+/**
+ * drm_dev_fini - Finalize a dead DRM device
+ * @dev: DRM device
+ *
+ * Finalize a dead DRM device. This is the converse to drm_dev_init() and
+ * frees up all data allocated by it. All driver private data should be
+ * finalized first. Note that this function does not free the @dev, that is
+ * left to the caller.
+ *
+ * The ref-count of @dev must be zero, and drm_dev_fini() should only be called
+ * from a &drm_driver.release callback.
+ */
+void drm_dev_fini(struct drm_device *dev)
+{
+	drm_vblank_cleanup(dev);
+
+	if (drm_core_check_feature(dev, DRIVER_GEM))
+		drm_gem_destroy(dev);
+
+	drm_legacy_ctxbitmap_cleanup(dev);
+	drm_ht_remove(&dev->map_hash);
+	drm_fs_inode_free(dev->anon_inode);
+
+	drm_minor_free(dev, DRM_MINOR_PRIMARY);
+	drm_minor_free(dev, DRM_MINOR_RENDER);
+	drm_minor_free(dev, DRM_MINOR_CONTROL);
+
+	mutex_destroy(&dev->master_mutex);
+	mutex_destroy(&dev->ctxlist_mutex);
+	mutex_destroy(&dev->filelist_mutex);
+	mutex_destroy(&dev->struct_mutex);
+	kfree(dev->unique);
+}
+EXPORT_SYMBOL(drm_dev_fini);
+
 /**
 /**
  * drm_dev_alloc - Allocate new DRM device
  * drm_dev_alloc - Allocate new DRM device
  * @driver: DRM driver to allocate device for
  * @driver: DRM driver to allocate device for
@@ -598,25 +636,12 @@ static void drm_dev_release(struct kref *ref)
 {
 {
 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
 
 
-	drm_vblank_cleanup(dev);
-
-	if (drm_core_check_feature(dev, DRIVER_GEM))
-		drm_gem_destroy(dev);
-
-	drm_legacy_ctxbitmap_cleanup(dev);
-	drm_ht_remove(&dev->map_hash);
-	drm_fs_inode_free(dev->anon_inode);
-
-	drm_minor_free(dev, DRM_MINOR_PRIMARY);
-	drm_minor_free(dev, DRM_MINOR_RENDER);
-	drm_minor_free(dev, DRM_MINOR_CONTROL);
-
-	mutex_destroy(&dev->master_mutex);
-	mutex_destroy(&dev->ctxlist_mutex);
-	mutex_destroy(&dev->filelist_mutex);
-	mutex_destroy(&dev->struct_mutex);
-	kfree(dev->unique);
-	kfree(dev);
+	if (dev->driver->release) {
+		dev->driver->release(dev);
+	} else {
+		drm_dev_fini(dev);
+		kfree(dev);
+	}
 }
 }
 
 
 /**
 /**

+ 5 - 3
drivers/gpu/drm/drm_edid.c

@@ -38,6 +38,8 @@
 #include <drm/drm_encoder.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_displayid.h>
 #include <drm/drm_displayid.h>
 
 
+#include "drm_crtc_internal.h"
+
 #define version_greater(edid, maj, min) \
 #define version_greater(edid, maj, min) \
 	(((edid)->version > (maj)) || \
 	(((edid)->version > (maj)) || \
 	 ((edid)->version == (maj) && (edid)->revision > (min)))
 	 ((edid)->version == (maj) && (edid)->revision > (min)))
@@ -2153,7 +2155,7 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
 /* fix up 1366x768 mode from 1368x768;
 /* fix up 1366x768 mode from 1368x768;
  * GFT/CVT can't express 1366 width which isn't dividable by 8
  * GFT/CVT can't express 1366 width which isn't dividable by 8
  */
  */
-static void fixup_mode_1366x768(struct drm_display_mode *mode)
+void drm_mode_fixup_1366x768(struct drm_display_mode *mode)
 {
 {
 	if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
 	if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
 		mode->hdisplay = 1366;
 		mode->hdisplay = 1366;
@@ -2177,7 +2179,7 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
 		if (!newmode)
 		if (!newmode)
 			return modes;
 			return modes;
 
 
-		fixup_mode_1366x768(newmode);
+		drm_mode_fixup_1366x768(newmode);
 		if (!mode_in_range(newmode, edid, timing) ||
 		if (!mode_in_range(newmode, edid, timing) ||
 		    !valid_inferred_mode(connector, newmode)) {
 		    !valid_inferred_mode(connector, newmode)) {
 			drm_mode_destroy(dev, newmode);
 			drm_mode_destroy(dev, newmode);
@@ -2206,7 +2208,7 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
 		if (!newmode)
 		if (!newmode)
 			return modes;
 			return modes;
 
 
-		fixup_mode_1366x768(newmode);
+		drm_mode_fixup_1366x768(newmode);
 		if (!mode_in_range(newmode, edid, timing) ||
 		if (!mode_in_range(newmode, edid, timing) ||
 		    !valid_inferred_mode(connector, newmode)) {
 		    !valid_inferred_mode(connector, newmode)) {
 			drm_mode_destroy(dev, newmode);
 			drm_mode_destroy(dev, newmode);

+ 7 - 8
drivers/gpu/drm/drm_fb_cma_helper.c

@@ -489,15 +489,14 @@ static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
  * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
  * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
  * @dev: DRM device
  * @dev: DRM device
  * @preferred_bpp: Preferred bits per pixel for the device
  * @preferred_bpp: Preferred bits per pixel for the device
- * @num_crtc: Number of CRTCs
  * @max_conn_count: Maximum number of connectors
  * @max_conn_count: Maximum number of connectors
  * @funcs: fb helper functions, in particular a custom dirty() callback
  * @funcs: fb helper functions, in particular a custom dirty() callback
  *
  *
  * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
  * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
  */
  */
 struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
 struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
-	unsigned int preferred_bpp, unsigned int num_crtc,
-	unsigned int max_conn_count, const struct drm_framebuffer_funcs *funcs)
+	unsigned int preferred_bpp, unsigned int max_conn_count,
+	const struct drm_framebuffer_funcs *funcs)
 {
 {
 	struct drm_fbdev_cma *fbdev_cma;
 	struct drm_fbdev_cma *fbdev_cma;
 	struct drm_fb_helper *helper;
 	struct drm_fb_helper *helper;
@@ -514,7 +513,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
 
 
 	drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
 	drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
 
 
-	ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
+	ret = drm_fb_helper_init(dev, helper, max_conn_count);
 	if (ret < 0) {
 	if (ret < 0) {
 		dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
 		dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
 		goto err_free;
 		goto err_free;
@@ -554,11 +553,11 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
  * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
  * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
  */
  */
 struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
 struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
-	unsigned int preferred_bpp, unsigned int num_crtc,
-	unsigned int max_conn_count)
+	unsigned int preferred_bpp, unsigned int max_conn_count)
 {
 {
-	return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, num_crtc,
-				max_conn_count, &drm_fb_cma_funcs);
+	return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp,
+					     max_conn_count,
+					     &drm_fb_cma_funcs);
 }
 }
 EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
 EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
 
 

+ 8 - 5
drivers/gpu/drm/drm_fb_helper.c

@@ -712,7 +712,6 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
  * drm_fb_helper_init - initialize a drm_fb_helper structure
  * drm_fb_helper_init - initialize a drm_fb_helper structure
  * @dev: drm device
  * @dev: drm device
  * @fb_helper: driver-allocated fbdev helper structure to initialize
  * @fb_helper: driver-allocated fbdev helper structure to initialize
- * @crtc_count: maximum number of crtcs to support in this fbdev emulation
  * @max_conn_count: max connector count
  * @max_conn_count: max connector count
  *
  *
  * This allocates the structures for the fbdev helper with the given limits.
  * This allocates the structures for the fbdev helper with the given limits.
@@ -727,9 +726,10 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
  */
  */
 int drm_fb_helper_init(struct drm_device *dev,
 int drm_fb_helper_init(struct drm_device *dev,
 		       struct drm_fb_helper *fb_helper,
 		       struct drm_fb_helper *fb_helper,
-		       int crtc_count, int max_conn_count)
+		       int max_conn_count)
 {
 {
 	struct drm_crtc *crtc;
 	struct drm_crtc *crtc;
+	struct drm_mode_config *config = &dev->mode_config;
 	int i;
 	int i;
 
 
 	if (!drm_fbdev_emulation)
 	if (!drm_fbdev_emulation)
@@ -738,11 +738,11 @@ int drm_fb_helper_init(struct drm_device *dev,
 	if (!max_conn_count)
 	if (!max_conn_count)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
+	fb_helper->crtc_info = kcalloc(config->num_crtc, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
 	if (!fb_helper->crtc_info)
 	if (!fb_helper->crtc_info)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	fb_helper->crtc_count = crtc_count;
+	fb_helper->crtc_count = config->num_crtc;
 	fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
 	fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
 	if (!fb_helper->connector_info) {
 	if (!fb_helper->connector_info) {
 		kfree(fb_helper->crtc_info);
 		kfree(fb_helper->crtc_info);
@@ -751,7 +751,7 @@ int drm_fb_helper_init(struct drm_device *dev,
 	fb_helper->connector_info_alloc_count = dev->mode_config.num_connector;
 	fb_helper->connector_info_alloc_count = dev->mode_config.num_connector;
 	fb_helper->connector_count = 0;
 	fb_helper->connector_count = 0;
 
 
-	for (i = 0; i < crtc_count; i++) {
+	for (i = 0; i < fb_helper->crtc_count; i++) {
 		fb_helper->crtc_info[i].mode_set.connectors =
 		fb_helper->crtc_info[i].mode_set.connectors =
 			kcalloc(max_conn_count,
 			kcalloc(max_conn_count,
 				sizeof(struct drm_connector *),
 				sizeof(struct drm_connector *),
@@ -860,6 +860,9 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
 	if (!drm_fbdev_emulation)
 	if (!drm_fbdev_emulation)
 		return;
 		return;
 
 
+	cancel_work_sync(&fb_helper->resume_work);
+	cancel_work_sync(&fb_helper->dirty_work);
+
 	mutex_lock(&kernel_fb_helper_lock);
 	mutex_lock(&kernel_fb_helper_lock);
 	if (!list_empty(&fb_helper->kernel_fb_list)) {
 	if (!list_empty(&fb_helper->kernel_fb_list)) {
 		list_del(&fb_helper->kernel_fb_list);
 		list_del(&fb_helper->kernel_fb_list);

+ 272 - 216
drivers/gpu/drm/drm_mm.c

@@ -97,14 +97,6 @@
  * locking would be fully redundant.
  * locking would be fully redundant.
  */
  */
 
 
-static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
-						u64 size,
-						u64 alignment,
-						unsigned long color,
-						u64 start,
-						u64 end,
-						enum drm_mm_search_flags flags);
-
 #ifdef CONFIG_DRM_DEBUG_MM
 #ifdef CONFIG_DRM_DEBUG_MM
 #include <linux/stackdepot.h>
 #include <linux/stackdepot.h>
 
 
@@ -226,69 +218,151 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
 			    &drm_mm_interval_tree_augment);
 			    &drm_mm_interval_tree_augment);
 }
 }
 
 
-static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
-				 struct drm_mm_node *node,
-				 u64 size, u64 alignment,
-				 unsigned long color,
-				 u64 range_start, u64 range_end,
-				 enum drm_mm_allocator_flags flags)
+#define RB_INSERT(root, member, expr) do { \
+	struct rb_node **link = &root.rb_node, *rb = NULL; \
+	u64 x = expr(node); \
+	while (*link) { \
+		rb = *link; \
+		if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
+			link = &rb->rb_left; \
+		else \
+			link = &rb->rb_right; \
+	} \
+	rb_link_node(&node->member, rb, link); \
+	rb_insert_color(&node->member, &root); \
+} while (0)
+
+#define HOLE_SIZE(NODE) ((NODE)->hole_size)
+#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
+
+static void add_hole(struct drm_mm_node *node)
 {
 {
-	struct drm_mm *mm = hole_node->mm;
-	u64 hole_start = drm_mm_hole_node_start(hole_node);
-	u64 hole_end = drm_mm_hole_node_end(hole_node);
-	u64 adj_start = hole_start;
-	u64 adj_end = hole_end;
+	struct drm_mm *mm = node->mm;
 
 
-	DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node) || node->allocated);
+	node->hole_size =
+		__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
+	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 
 
-	if (mm->color_adjust)
-		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+	RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
+	RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
 
 
-	adj_start = max(adj_start, range_start);
-	adj_end = min(adj_end, range_end);
+	list_add(&node->hole_stack, &mm->hole_stack);
+}
 
 
-	if (flags & DRM_MM_CREATE_TOP)
-		adj_start = adj_end - size;
+static void rm_hole(struct drm_mm_node *node)
+{
+	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 
 
-	if (alignment) {
-		u64 rem;
+	list_del(&node->hole_stack);
+	rb_erase(&node->rb_hole_size, &node->mm->holes_size);
+	rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
+	node->hole_size = 0;
 
 
-		div64_u64_rem(adj_start, alignment, &rem);
-		if (rem) {
-			if (flags & DRM_MM_CREATE_TOP)
-				adj_start -= rem;
-			else
-				adj_start += alignment - rem;
+	DRM_MM_BUG_ON(drm_mm_hole_follows(node));
+}
+
+static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
+{
+	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
+}
+
+static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
+{
+	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
+}
+
+static inline u64 rb_hole_size(struct rb_node *rb)
+{
+	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
+}
+
+static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
+{
+	struct rb_node *best = NULL;
+	struct rb_node **link = &mm->holes_size.rb_node;
+
+	while (*link) {
+		struct rb_node *rb = *link;
+
+		if (size <= rb_hole_size(rb)) {
+			link = &rb->rb_left;
+			best = rb;
+		} else {
+			link = &rb->rb_right;
 		}
 		}
 	}
 	}
 
 
-	if (adj_start == hole_start) {
-		hole_node->hole_follows = 0;
-		list_del(&hole_node->hole_stack);
+	return rb_hole_size_to_node(best);
+}
+
+static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
+{
+	struct drm_mm_node *node = NULL;
+	struct rb_node **link = &mm->holes_addr.rb_node;
+
+	while (*link) {
+		u64 hole_start;
+
+		node = rb_hole_addr_to_node(*link);
+		hole_start = __drm_mm_hole_node_start(node);
+
+		if (addr < hole_start)
+			link = &node->rb_hole_addr.rb_left;
+		else if (addr > hole_start + node->hole_size)
+			link = &node->rb_hole_addr.rb_right;
+		else
+			break;
 	}
 	}
 
 
-	node->start = adj_start;
-	node->size = size;
-	node->mm = mm;
-	node->color = color;
-	node->allocated = 1;
+	return node;
+}
 
 
-	list_add(&node->node_list, &hole_node->node_list);
+static struct drm_mm_node *
+first_hole(struct drm_mm *mm,
+	   u64 start, u64 end, u64 size,
+	   enum drm_mm_insert_mode mode)
+{
+	if (RB_EMPTY_ROOT(&mm->holes_size))
+		return NULL;
 
 
-	drm_mm_interval_tree_add_node(hole_node, node);
+	switch (mode) {
+	default:
+	case DRM_MM_INSERT_BEST:
+		return best_hole(mm, size);
 
 
-	DRM_MM_BUG_ON(node->start < range_start);
-	DRM_MM_BUG_ON(node->start < adj_start);
-	DRM_MM_BUG_ON(node->start + node->size > adj_end);
-	DRM_MM_BUG_ON(node->start + node->size > range_end);
+	case DRM_MM_INSERT_LOW:
+		return find_hole(mm, start);
 
 
-	node->hole_follows = 0;
-	if (__drm_mm_hole_node_start(node) < hole_end) {
-		list_add(&node->hole_stack, &mm->hole_stack);
-		node->hole_follows = 1;
+	case DRM_MM_INSERT_HIGH:
+		return find_hole(mm, end);
+
+	case DRM_MM_INSERT_EVICT:
+		return list_first_entry_or_null(&mm->hole_stack,
+						struct drm_mm_node,
+						hole_stack);
 	}
 	}
+}
 
 
-	save_stack(node);
+static struct drm_mm_node *
+next_hole(struct drm_mm *mm,
+	  struct drm_mm_node *node,
+	  enum drm_mm_insert_mode mode)
+{
+	switch (mode) {
+	default:
+	case DRM_MM_INSERT_BEST:
+		return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
+
+	case DRM_MM_INSERT_LOW:
+		return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
+
+	case DRM_MM_INSERT_HIGH:
+		return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
+
+	case DRM_MM_INSERT_EVICT:
+		node = list_next_entry(node, hole_stack);
+		return &node->hole_stack == &mm->hole_stack ? NULL : node;
+	}
 }
 }
 
 
 /**
 /**
@@ -317,21 +391,12 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 		return -ENOSPC;
 		return -ENOSPC;
 
 
 	/* Find the relevant hole to add our node to */
 	/* Find the relevant hole to add our node to */
-	hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
-					       node->start, ~(u64)0);
-	if (hole) {
-		if (hole->start < end)
-			return -ENOSPC;
-	} else {
-		hole = list_entry(drm_mm_nodes(mm), typeof(*hole), node_list);
-	}
-
-	hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
-	if (!drm_mm_hole_follows(hole))
+	hole = find_hole(mm, node->start);
+	if (!hole)
 		return -ENOSPC;
 		return -ENOSPC;
 
 
 	adj_start = hole_start = __drm_mm_hole_node_start(hole);
 	adj_start = hole_start = __drm_mm_hole_node_start(hole);
-	adj_end = hole_end = __drm_mm_hole_node_end(hole);
+	adj_end = hole_end = hole_start + hole->hole_size;
 
 
 	if (mm->color_adjust)
 	if (mm->color_adjust)
 		mm->color_adjust(hole, node->color, &adj_start, &adj_end);
 		mm->color_adjust(hole, node->color, &adj_start, &adj_end);
@@ -340,70 +405,130 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 		return -ENOSPC;
 		return -ENOSPC;
 
 
 	node->mm = mm;
 	node->mm = mm;
-	node->allocated = 1;
 
 
 	list_add(&node->node_list, &hole->node_list);
 	list_add(&node->node_list, &hole->node_list);
-
 	drm_mm_interval_tree_add_node(hole, node);
 	drm_mm_interval_tree_add_node(hole, node);
+	node->allocated = true;
+	node->hole_size = 0;
 
 
-	if (node->start == hole_start) {
-		hole->hole_follows = 0;
-		list_del(&hole->hole_stack);
-	}
-
-	node->hole_follows = 0;
-	if (end != hole_end) {
-		list_add(&node->hole_stack, &mm->hole_stack);
-		node->hole_follows = 1;
-	}
+	rm_hole(hole);
+	if (node->start > hole_start)
+		add_hole(hole);
+	if (end < hole_end)
+		add_hole(node);
 
 
 	save_stack(node);
 	save_stack(node);
-
 	return 0;
 	return 0;
 }
 }
 EXPORT_SYMBOL(drm_mm_reserve_node);
 EXPORT_SYMBOL(drm_mm_reserve_node);
 
 
 /**
 /**
- * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
+ * drm_mm_insert_node_in_range - ranged search for space and insert @node
  * @mm: drm_mm to allocate from
  * @mm: drm_mm to allocate from
  * @node: preallocate node to insert
  * @node: preallocate node to insert
  * @size: size of the allocation
  * @size: size of the allocation
  * @alignment: alignment of the allocation
  * @alignment: alignment of the allocation
  * @color: opaque tag value to use for this node
  * @color: opaque tag value to use for this node
- * @start: start of the allowed range for this node
- * @end: end of the allowed range for this node
- * @sflags: flags to fine-tune the allocation search
- * @aflags: flags to fine-tune the allocation behavior
+ * @range_start: start of the allowed range for this node
+ * @range_end: end of the allowed range for this node
+ * @mode: fine-tune the allocation search and placement
  *
  *
  * The preallocated @node must be cleared to 0.
  * The preallocated @node must be cleared to 0.
  *
  *
  * Returns:
  * Returns:
  * 0 on success, -ENOSPC if there's no suitable hole.
  * 0 on success, -ENOSPC if there's no suitable hole.
  */
  */
-int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
-					u64 size, u64 alignment,
-					unsigned long color,
-					u64 start, u64 end,
-					enum drm_mm_search_flags sflags,
-					enum drm_mm_allocator_flags aflags)
+int drm_mm_insert_node_in_range(struct drm_mm * const mm,
+				struct drm_mm_node * const node,
+				u64 size, u64 alignment,
+				unsigned long color,
+				u64 range_start, u64 range_end,
+				enum drm_mm_insert_mode mode)
 {
 {
-	struct drm_mm_node *hole_node;
+	struct drm_mm_node *hole;
+	u64 remainder_mask;
 
 
-	if (WARN_ON(size == 0))
-		return -EINVAL;
+	DRM_MM_BUG_ON(range_start >= range_end);
 
 
-	hole_node = drm_mm_search_free_in_range_generic(mm,
-							size, alignment, color,
-							start, end, sflags);
-	if (!hole_node)
+	if (unlikely(size == 0 || range_end - range_start < size))
 		return -ENOSPC;
 		return -ENOSPC;
 
 
-	drm_mm_insert_helper(hole_node, node,
-			     size, alignment, color,
-			     start, end, aflags);
-	return 0;
+	if (alignment <= 1)
+		alignment = 0;
+
+	remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
+	for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
+	     hole = next_hole(mm, hole, mode)) {
+		u64 hole_start = __drm_mm_hole_node_start(hole);
+		u64 hole_end = hole_start + hole->hole_size;
+		u64 adj_start, adj_end;
+		u64 col_start, col_end;
+
+		if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
+			break;
+
+		if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
+			break;
+
+		col_start = hole_start;
+		col_end = hole_end;
+		if (mm->color_adjust)
+			mm->color_adjust(hole, color, &col_start, &col_end);
+
+		adj_start = max(col_start, range_start);
+		adj_end = min(col_end, range_end);
+
+		if (adj_end <= adj_start || adj_end - adj_start < size)
+			continue;
+
+		if (mode == DRM_MM_INSERT_HIGH)
+			adj_start = adj_end - size;
+
+		if (alignment) {
+			u64 rem;
+
+			if (likely(remainder_mask))
+				rem = adj_start & remainder_mask;
+			else
+				div64_u64_rem(adj_start, alignment, &rem);
+			if (rem) {
+				adj_start -= rem;
+				if (mode != DRM_MM_INSERT_HIGH)
+					adj_start += alignment;
+
+				if (adj_start < max(col_start, range_start) ||
+				    min(col_end, range_end) - adj_start < size)
+					continue;
+
+				if (adj_end <= adj_start ||
+				    adj_end - adj_start < size)
+					continue;
+			}
+		}
+
+		node->mm = mm;
+		node->size = size;
+		node->start = adj_start;
+		node->color = color;
+		node->hole_size = 0;
+
+		list_add(&node->node_list, &hole->node_list);
+		drm_mm_interval_tree_add_node(hole, node);
+		node->allocated = true;
+
+		rm_hole(hole);
+		if (adj_start > hole_start)
+			add_hole(hole);
+		if (adj_start + size < hole_end)
+			add_hole(node);
+
+		save_stack(node);
+		return 0;
+	}
+
+	return -ENOSPC;
 }
 }
-EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
+EXPORT_SYMBOL(drm_mm_insert_node_in_range);
 
 
 /**
 /**
  * drm_mm_remove_node - Remove a memory node from the allocator.
  * drm_mm_remove_node - Remove a memory node from the allocator.
@@ -421,92 +546,20 @@ void drm_mm_remove_node(struct drm_mm_node *node)
 	DRM_MM_BUG_ON(!node->allocated);
 	DRM_MM_BUG_ON(!node->allocated);
 	DRM_MM_BUG_ON(node->scanned_block);
 	DRM_MM_BUG_ON(node->scanned_block);
 
 
-	prev_node =
-	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
-
-	if (drm_mm_hole_follows(node)) {
-		DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) ==
-			      __drm_mm_hole_node_end(node));
-		list_del(&node->hole_stack);
-	} else {
-		DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) !=
-			      __drm_mm_hole_node_end(node));
-	}
+	prev_node = list_prev_entry(node, node_list);
 
 
-	if (!drm_mm_hole_follows(prev_node)) {
-		prev_node->hole_follows = 1;
-		list_add(&prev_node->hole_stack, &mm->hole_stack);
-	} else
-		list_move(&prev_node->hole_stack, &mm->hole_stack);
+	if (drm_mm_hole_follows(node))
+		rm_hole(node);
 
 
 	drm_mm_interval_tree_remove(node, &mm->interval_tree);
 	drm_mm_interval_tree_remove(node, &mm->interval_tree);
 	list_del(&node->node_list);
 	list_del(&node->node_list);
-	node->allocated = 0;
-}
-EXPORT_SYMBOL(drm_mm_remove_node);
-
-static int check_free_hole(u64 start, u64 end, u64 size, u64 alignment)
-{
-	if (end - start < size)
-		return 0;
-
-	if (alignment) {
-		u64 rem;
-
-		div64_u64_rem(start, alignment, &rem);
-		if (rem)
-			start += alignment - rem;
-	}
+	node->allocated = false;
 
 
-	return end >= start + size;
-}
-
-static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
-							u64 size,
-							u64 alignment,
-							unsigned long color,
-							u64 start,
-							u64 end,
-							enum drm_mm_search_flags flags)
-{
-	struct drm_mm_node *entry;
-	struct drm_mm_node *best;
-	u64 adj_start;
-	u64 adj_end;
-	u64 best_size;
-
-	DRM_MM_BUG_ON(mm->scan_active);
-
-	best = NULL;
-	best_size = ~0UL;
-
-	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
-			       flags & DRM_MM_SEARCH_BELOW) {
-		u64 hole_size = adj_end - adj_start;
-
-		if (mm->color_adjust) {
-			mm->color_adjust(entry, color, &adj_start, &adj_end);
-			if (adj_end <= adj_start)
-				continue;
-		}
-
-		adj_start = max(adj_start, start);
-		adj_end = min(adj_end, end);
-
-		if (!check_free_hole(adj_start, adj_end, size, alignment))
-			continue;
-
-		if (!(flags & DRM_MM_SEARCH_BEST))
-			return entry;
-
-		if (hole_size < best_size) {
-			best = entry;
-			best_size = hole_size;
-		}
-	}
-
-	return best;
+	if (drm_mm_hole_follows(prev_node))
+		rm_hole(prev_node);
+	add_hole(prev_node);
 }
 }
+EXPORT_SYMBOL(drm_mm_remove_node);
 
 
 /**
 /**
  * drm_mm_replace_node - move an allocation from @old to @new
  * drm_mm_replace_node - move an allocation from @old to @new
@@ -521,18 +574,23 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 {
 {
 	DRM_MM_BUG_ON(!old->allocated);
 	DRM_MM_BUG_ON(!old->allocated);
 
 
+	*new = *old;
+
 	list_replace(&old->node_list, &new->node_list);
 	list_replace(&old->node_list, &new->node_list);
-	list_replace(&old->hole_stack, &new->hole_stack);
 	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
 	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
-	new->hole_follows = old->hole_follows;
-	new->mm = old->mm;
-	new->start = old->start;
-	new->size = old->size;
-	new->color = old->color;
-	new->__subtree_last = old->__subtree_last;
-
-	old->allocated = 0;
-	new->allocated = 1;
+
+	if (drm_mm_hole_follows(old)) {
+		list_replace(&old->hole_stack, &new->hole_stack);
+		rb_replace_node(&old->rb_hole_size,
+				&new->rb_hole_size,
+				&old->mm->holes_size);
+		rb_replace_node(&old->rb_hole_addr,
+				&new->rb_hole_addr,
+				&old->mm->holes_addr);
+	}
+
+	old->allocated = false;
+	new->allocated = true;
 }
 }
 EXPORT_SYMBOL(drm_mm_replace_node);
 EXPORT_SYMBOL(drm_mm_replace_node);
 
 
@@ -577,7 +635,7 @@ EXPORT_SYMBOL(drm_mm_replace_node);
  * @color: opaque tag value to use for the allocation
  * @color: opaque tag value to use for the allocation
  * @start: start of the allowed range for the allocation
  * @start: start of the allowed range for the allocation
  * @end: end of the allowed range for the allocation
  * @end: end of the allowed range for the allocation
- * @flags: flags to specify how the allocation will be performed afterwards
+ * @mode: fine-tune the allocation search and placement
  *
  *
  * This simply sets up the scanning routines with the parameters for the desired
  * This simply sets up the scanning routines with the parameters for the desired
  * hole.
  * hole.
@@ -593,7 +651,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
 				 unsigned long color,
 				 unsigned long color,
 				 u64 start,
 				 u64 start,
 				 u64 end,
 				 u64 end,
-				 unsigned int flags)
+				 enum drm_mm_insert_mode mode)
 {
 {
 	DRM_MM_BUG_ON(start >= end);
 	DRM_MM_BUG_ON(start >= end);
 	DRM_MM_BUG_ON(!size || size > end - start);
 	DRM_MM_BUG_ON(!size || size > end - start);
@@ -608,7 +666,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
 	scan->alignment = alignment;
 	scan->alignment = alignment;
 	scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
 	scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
 	scan->size = size;
 	scan->size = size;
-	scan->flags = flags;
+	scan->mode = mode;
 
 
 	DRM_MM_BUG_ON(end <= start);
 	DRM_MM_BUG_ON(end <= start);
 	scan->range_start = start;
 	scan->range_start = start;
@@ -667,7 +725,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
 	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
 	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
 		return false;
 		return false;
 
 
-	if (scan->flags == DRM_MM_CREATE_TOP)
+	if (scan->mode == DRM_MM_INSERT_HIGH)
 		adj_start = adj_end - scan->size;
 		adj_start = adj_end - scan->size;
 
 
 	if (scan->alignment) {
 	if (scan->alignment) {
@@ -679,7 +737,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
 			div64_u64_rem(adj_start, scan->alignment, &rem);
 			div64_u64_rem(adj_start, scan->alignment, &rem);
 		if (rem) {
 		if (rem) {
 			adj_start -= rem;
 			adj_start -= rem;
-			if (scan->flags != DRM_MM_CREATE_TOP)
+			if (scan->mode != DRM_MM_INSERT_HIGH)
 				adj_start += scan->alignment;
 				adj_start += scan->alignment;
 			if (adj_start < max(col_start, scan->range_start) ||
 			if (adj_start < max(col_start, scan->range_start) ||
 			    min(col_end, scan->range_end) - adj_start < scan->size)
 			    min(col_end, scan->range_end) - adj_start < scan->size)
@@ -775,7 +833,7 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
 
 
 	hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
 	hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
 	hole_start = __drm_mm_hole_node_start(hole);
 	hole_start = __drm_mm_hole_node_start(hole);
-	hole_end = __drm_mm_hole_node_end(hole);
+	hole_end = hole_start + hole->hole_size;
 
 
 	DRM_MM_BUG_ON(hole_start > scan->hit_start);
 	DRM_MM_BUG_ON(hole_start > scan->hit_start);
 	DRM_MM_BUG_ON(hole_end < scan->hit_end);
 	DRM_MM_BUG_ON(hole_end < scan->hit_end);
@@ -802,21 +860,22 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
 {
 {
 	DRM_MM_BUG_ON(start + size <= start);
 	DRM_MM_BUG_ON(start + size <= start);
 
 
+	mm->color_adjust = NULL;
+
 	INIT_LIST_HEAD(&mm->hole_stack);
 	INIT_LIST_HEAD(&mm->hole_stack);
-	mm->scan_active = 0;
+	mm->interval_tree = RB_ROOT;
+	mm->holes_size = RB_ROOT;
+	mm->holes_addr = RB_ROOT;
 
 
 	/* Clever trick to avoid a special case in the free hole tracking. */
 	/* Clever trick to avoid a special case in the free hole tracking. */
 	INIT_LIST_HEAD(&mm->head_node.node_list);
 	INIT_LIST_HEAD(&mm->head_node.node_list);
-	mm->head_node.allocated = 0;
-	mm->head_node.hole_follows = 1;
+	mm->head_node.allocated = false;
 	mm->head_node.mm = mm;
 	mm->head_node.mm = mm;
 	mm->head_node.start = start + size;
 	mm->head_node.start = start + size;
-	mm->head_node.size = start - mm->head_node.start;
-	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+	mm->head_node.size = -size;
+	add_hole(&mm->head_node);
 
 
-	mm->interval_tree = RB_ROOT;
-
-	mm->color_adjust = NULL;
+	mm->scan_active = 0;
 }
 }
 EXPORT_SYMBOL(drm_mm_init);
 EXPORT_SYMBOL(drm_mm_init);
 
 
@@ -837,20 +896,17 @@ EXPORT_SYMBOL(drm_mm_takedown);
 
 
 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
 {
 {
-	u64 hole_start, hole_end, hole_size;
-
-	if (entry->hole_follows) {
-		hole_start = drm_mm_hole_node_start(entry);
-		hole_end = drm_mm_hole_node_end(entry);
-		hole_size = hole_end - hole_start;
-		drm_printf(p, "%#018llx-%#018llx: %llu: free\n", hole_start,
-			   hole_end, hole_size);
-		return hole_size;
+	u64 start, size;
+
+	size = entry->hole_size;
+	if (size) {
+		start = drm_mm_hole_node_start(entry);
+		drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
+			   start, start + size, size);
 	}
 	}
 
 
-	return 0;
+	return size;
 }
 }
-
 /**
 /**
  * drm_mm_print - print allocator state
  * drm_mm_print - print allocator state
  * @mm: drm_mm allocator to print
  * @mm: drm_mm allocator to print

+ 2 - 6
drivers/gpu/drm/drm_modes.c

@@ -1481,12 +1481,8 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
 
 
 	mode->type |= DRM_MODE_TYPE_USERDEF;
 	mode->type |= DRM_MODE_TYPE_USERDEF;
 	/* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
 	/* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
-	if (cmd->xres == 1366 && mode->hdisplay == 1368) {
-		mode->hdisplay = 1366;
-		mode->hsync_start--;
-		mode->hsync_end--;
-		drm_mode_set_name(mode);
-	}
+	if (cmd->xres == 1366)
+		drm_mode_fixup_1366x768(mode);
 	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 	return mode;
 	return mode;
 }
 }

+ 1 - 2
drivers/gpu/drm/drm_vma_manager.c

@@ -212,8 +212,7 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
 		goto out_unlock;
 		goto out_unlock;
 	}
 	}
 
 
-	ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
-				 pages, 0, DRM_MM_SEARCH_DEFAULT);
+	ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, pages);
 	if (ret)
 	if (ret)
 		goto out_unlock;
 		goto out_unlock;
 
 

+ 1 - 0
drivers/gpu/drm/etnaviv/Makefile

@@ -1,6 +1,7 @@
 etnaviv-y := \
 etnaviv-y := \
 	etnaviv_buffer.o \
 	etnaviv_buffer.o \
 	etnaviv_cmd_parser.o \
 	etnaviv_cmd_parser.o \
+	etnaviv_cmdbuf.o \
 	etnaviv_drv.o \
 	etnaviv_drv.o \
 	etnaviv_dump.o \
 	etnaviv_dump.o \
 	etnaviv_gem_prime.o \
 	etnaviv_gem_prime.o \

+ 8 - 6
drivers/gpu/drm/etnaviv/etnaviv_buffer.c

@@ -15,6 +15,7 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
  */
 
 
+#include "etnaviv_cmdbuf.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_mmu.h"
 #include "etnaviv_mmu.h"
@@ -125,7 +126,7 @@ static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
 	u32 *ptr = buf->vaddr + off;
 	u32 *ptr = buf->vaddr + off;
 
 
 	dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
 	dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
-			ptr, etnaviv_iommu_get_cmdbuf_va(gpu, buf) + off, size - len * 4 - off);
+			ptr, etnaviv_cmdbuf_get_va(buf) + off, size - len * 4 - off);
 
 
 	print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
 	print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
 			ptr, len * 4, 0);
 			ptr, len * 4, 0);
@@ -158,7 +159,7 @@ static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
 	if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
 	if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
 		buffer->user_size = 0;
 		buffer->user_size = 0;
 
 
-	return etnaviv_iommu_get_cmdbuf_va(gpu, buffer) + buffer->user_size;
+	return etnaviv_cmdbuf_get_va(buffer) + buffer->user_size;
 }
 }
 
 
 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
@@ -169,7 +170,7 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
 	buffer->user_size = 0;
 	buffer->user_size = 0;
 
 
 	CMD_WAIT(buffer);
 	CMD_WAIT(buffer);
-	CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
+	CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
 		 buffer->user_size - 4);
 		 buffer->user_size - 4);
 
 
 	return buffer->user_size / 8;
 	return buffer->user_size / 8;
@@ -261,7 +262,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
 	if (drm_debug & DRM_UT_DRIVER)
 	if (drm_debug & DRM_UT_DRIVER)
 		etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
 		etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
 
 
-	link_target = etnaviv_iommu_get_cmdbuf_va(gpu, cmdbuf);
+	link_target = etnaviv_cmdbuf_get_va(cmdbuf);
 	link_dwords = cmdbuf->size / 8;
 	link_dwords = cmdbuf->size / 8;
 
 
 	/*
 	/*
@@ -355,12 +356,13 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
 	CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
 	CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
 		       VIVS_GL_EVENT_FROM_PE);
 		       VIVS_GL_EVENT_FROM_PE);
 	CMD_WAIT(buffer);
 	CMD_WAIT(buffer);
-	CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
+	CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
 			    buffer->user_size - 4);
 			    buffer->user_size - 4);
 
 
 	if (drm_debug & DRM_UT_DRIVER)
 	if (drm_debug & DRM_UT_DRIVER)
 		pr_info("stream link to 0x%08x @ 0x%08x %p\n",
 		pr_info("stream link to 0x%08x @ 0x%08x %p\n",
-			return_target, etnaviv_iommu_get_cmdbuf_va(gpu, cmdbuf), cmdbuf->vaddr);
+			return_target, etnaviv_cmdbuf_get_va(cmdbuf),
+			cmdbuf->vaddr);
 
 
 	if (drm_debug & DRM_UT_DRIVER) {
 	if (drm_debug & DRM_UT_DRIVER) {
 		print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
 		print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,

+ 6 - 0
drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c

@@ -56,6 +56,8 @@ static const struct {
 	ST(0x0644, 1),
 	ST(0x0644, 1),
 	ST(0x064c, 1),
 	ST(0x064c, 1),
 	ST(0x0680, 8),
 	ST(0x0680, 8),
+	ST(0x086c, 1),
+	ST(0x1028, 1),
 	ST(0x1410, 1),
 	ST(0x1410, 1),
 	ST(0x1430, 1),
 	ST(0x1430, 1),
 	ST(0x1458, 1),
 	ST(0x1458, 1),
@@ -73,8 +75,12 @@ static const struct {
 	ST(0x16c0, 8),
 	ST(0x16c0, 8),
 	ST(0x16e0, 8),
 	ST(0x16e0, 8),
 	ST(0x1740, 8),
 	ST(0x1740, 8),
+	ST(0x17c0, 8),
+	ST(0x17e0, 8),
 	ST(0x2400, 14 * 16),
 	ST(0x2400, 14 * 16),
 	ST(0x10800, 32 * 16),
 	ST(0x10800, 32 * 16),
+	ST(0x14600, 16),
+	ST(0x14800, 8 * 8),
 #undef ST
 #undef ST
 };
 };
 
 

+ 153 - 0
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c

@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <drm/drm_mm.h>
+
+#include "etnaviv_cmdbuf.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+
+#define SUBALLOC_SIZE		SZ_256K
+#define SUBALLOC_GRANULE	SZ_4K
+#define SUBALLOC_GRANULES	(SUBALLOC_SIZE / SUBALLOC_GRANULE)
+
+struct etnaviv_cmdbuf_suballoc {
+	/* suballocated dma buffer properties */
+	struct etnaviv_gpu *gpu;
+	void *vaddr;
+	dma_addr_t paddr;
+
+	/* GPU mapping */
+	u32 iova;
+	struct drm_mm_node vram_node; /* only used on MMUv2 */
+
+	/* allocation management */
+	struct mutex lock;
+	DECLARE_BITMAP(granule_map, SUBALLOC_GRANULES);
+	int free_space;
+	wait_queue_head_t free_event;
+};
+
+struct etnaviv_cmdbuf_suballoc *
+etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu)
+{
+	struct etnaviv_cmdbuf_suballoc *suballoc;
+	int ret;
+
+	suballoc = kzalloc(sizeof(*suballoc), GFP_KERNEL);
+	if (!suballoc)
+		return ERR_PTR(-ENOMEM);
+
+	suballoc->gpu = gpu;
+	mutex_init(&suballoc->lock);
+	init_waitqueue_head(&suballoc->free_event);
+
+	suballoc->vaddr = dma_alloc_wc(gpu->dev, SUBALLOC_SIZE,
+				       &suballoc->paddr, GFP_KERNEL);
+	if (!suballoc->vaddr)
+		goto free_suballoc;
+
+	ret = etnaviv_iommu_get_suballoc_va(gpu, suballoc->paddr,
+					    &suballoc->vram_node, SUBALLOC_SIZE,
+					    &suballoc->iova);
+	if (ret)
+		goto free_dma;
+
+	return suballoc;
+
+free_dma:
+	dma_free_wc(gpu->dev, SUBALLOC_SIZE, suballoc->vaddr, suballoc->paddr);
+free_suballoc:
+	kfree(suballoc);
+
+	return NULL;
+}
+
+void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
+{
+	etnaviv_iommu_put_suballoc_va(suballoc->gpu, &suballoc->vram_node,
+				      SUBALLOC_SIZE, suballoc->iova);
+	dma_free_wc(suballoc->gpu->dev, SUBALLOC_SIZE, suballoc->vaddr,
+		    suballoc->paddr);
+	kfree(suballoc);
+}
+
+struct etnaviv_cmdbuf *
+etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
+		   size_t nr_bos)
+{
+	struct etnaviv_cmdbuf *cmdbuf;
+	size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
+				 sizeof(*cmdbuf));
+	int granule_offs, order, ret;
+
+	cmdbuf = kzalloc(sz, GFP_KERNEL);
+	if (!cmdbuf)
+		return NULL;
+
+	cmdbuf->suballoc = suballoc;
+	cmdbuf->size = size;
+
+	order = order_base_2(ALIGN(size, SUBALLOC_GRANULE) / SUBALLOC_GRANULE);
+retry:
+	mutex_lock(&suballoc->lock);
+	granule_offs = bitmap_find_free_region(suballoc->granule_map,
+					SUBALLOC_GRANULES, order);
+	if (granule_offs < 0) {
+		suballoc->free_space = 0;
+		mutex_unlock(&suballoc->lock);
+		ret = wait_event_interruptible_timeout(suballoc->free_event,
+						       suballoc->free_space,
+						       msecs_to_jiffies(10 * 1000));
+		if (!ret) {
+			dev_err(suballoc->gpu->dev,
+				"Timeout waiting for cmdbuf space\n");
+			return NULL;
+		}
+		goto retry;
+	}
+	mutex_unlock(&suballoc->lock);
+	cmdbuf->suballoc_offset = granule_offs * SUBALLOC_GRANULE;
+	cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
+
+	return cmdbuf;
+}
+
+void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
+{
+	struct etnaviv_cmdbuf_suballoc *suballoc = cmdbuf->suballoc;
+	int order = order_base_2(ALIGN(cmdbuf->size, SUBALLOC_GRANULE) /
+				 SUBALLOC_GRANULE);
+
+	mutex_lock(&suballoc->lock);
+	bitmap_release_region(suballoc->granule_map,
+			      cmdbuf->suballoc_offset / SUBALLOC_GRANULE,
+			      order);
+	suballoc->free_space = 1;
+	mutex_unlock(&suballoc->lock);
+	wake_up_all(&suballoc->free_event);
+	kfree(cmdbuf);
+}
+
+u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf)
+{
+	return buf->suballoc->iova + buf->suballoc_offset;
+}
+
+dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf)
+{
+	return buf->suballoc->paddr + buf->suballoc_offset;
+}

+ 58 - 0
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h

@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_CMDBUF_H__
+#define __ETNAVIV_CMDBUF_H__
+
+#include <linux/types.h>
+
+struct etnaviv_gpu;
+struct etnaviv_cmdbuf_suballoc;
+
+struct etnaviv_cmdbuf {
+	/* suballocator this cmdbuf is allocated from */
+	struct etnaviv_cmdbuf_suballoc *suballoc;
+	/* user context key, must be unique between all active users */
+	struct etnaviv_file_private *ctx;
+	/* cmdbuf properties */
+	int suballoc_offset;
+	void *vaddr;
+	u32 size;
+	u32 user_size;
+	/* fence after which this buffer is to be disposed */
+	struct dma_fence *fence;
+	/* target exec state */
+	u32 exec_state;
+	/* per GPU in-flight list */
+	struct list_head node;
+	/* BOs attached to this command buffer */
+	unsigned int nr_bos;
+	struct etnaviv_vram_mapping *bo_map[0];
+};
+
+struct etnaviv_cmdbuf_suballoc *
+etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu);
+void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
+
+struct etnaviv_cmdbuf *
+etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
+		   size_t nr_bos);
+void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
+
+u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf);
+dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf);
+
+#endif /* __ETNAVIV_CMDBUF_H__ */

+ 3 - 2
drivers/gpu/drm/etnaviv/etnaviv_drv.c

@@ -18,11 +18,11 @@
 #include <linux/of_platform.h>
 #include <linux/of_platform.h>
 #include <drm/drm_of.h>
 #include <drm/drm_of.h>
 
 
+#include "etnaviv_cmdbuf.h"
 #include "etnaviv_drv.h"
 #include "etnaviv_drv.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_mmu.h"
 #include "etnaviv_mmu.h"
-#include "etnaviv_gem.h"
 
 
 #ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
 #ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
 static bool reglog;
 static bool reglog;
@@ -177,7 +177,8 @@ static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
 	u32 i;
 	u32 i;
 
 
 	seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
 	seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
-			buf->vaddr, (u64)buf->paddr, size - buf->user_size);
+			buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
+			size - buf->user_size);
 
 
 	for (i = 0; i < size / 4; i++) {
 	for (i = 0; i < size / 4; i++) {
 		if (i && !(i % 4))
 		if (i && !(i % 4))

+ 3 - 3
drivers/gpu/drm/etnaviv/etnaviv_dump.c

@@ -15,6 +15,7 @@
  */
  */
 
 
 #include <linux/devcoredump.h>
 #include <linux/devcoredump.h>
+#include "etnaviv_cmdbuf.h"
 #include "etnaviv_dump.h"
 #include "etnaviv_dump.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_gpu.h"
@@ -177,12 +178,11 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
 	etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
 	etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
 	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
 	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
 			      gpu->buffer->size,
 			      gpu->buffer->size,
-			      etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer));
+			      etnaviv_cmdbuf_get_va(gpu->buffer));
 
 
 	list_for_each_entry(cmd, &gpu->active_cmd_list, node)
 	list_for_each_entry(cmd, &gpu->active_cmd_list, node)
 		etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
 		etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
-				      cmd->size,
-				      etnaviv_iommu_get_cmdbuf_va(gpu, cmd));
+				      cmd->size, etnaviv_cmdbuf_get_va(cmd));
 
 
 	/* Reserve space for the bomap */
 	/* Reserve space for the bomap */
 	if (n_bomap_pages) {
 	if (n_bomap_pages) {

+ 5 - 3
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c

@@ -15,6 +15,7 @@
  */
  */
 
 
 #include <linux/reservation.h>
 #include <linux/reservation.h>
+#include "etnaviv_cmdbuf.h"
 #include "etnaviv_drv.h"
 #include "etnaviv_drv.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_gem.h"
@@ -332,8 +333,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
 	bos = drm_malloc_ab(args->nr_bos, sizeof(*bos));
 	bos = drm_malloc_ab(args->nr_bos, sizeof(*bos));
 	relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs));
 	relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs));
 	stream = drm_malloc_ab(1, args->stream_size);
 	stream = drm_malloc_ab(1, args->stream_size);
-	cmdbuf = etnaviv_gpu_cmdbuf_new(gpu, ALIGN(args->stream_size, 8) + 8,
-					args->nr_bos);
+	cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
+				    ALIGN(args->stream_size, 8) + 8,
+				    args->nr_bos);
 	if (!bos || !relocs || !stream || !cmdbuf) {
 	if (!bos || !relocs || !stream || !cmdbuf) {
 		ret = -ENOMEM;
 		ret = -ENOMEM;
 		goto err_submit_cmds;
 		goto err_submit_cmds;
@@ -422,7 +424,7 @@ err_submit_objects:
 err_submit_cmds:
 err_submit_cmds:
 	/* if we still own the cmdbuf */
 	/* if we still own the cmdbuf */
 	if (cmdbuf)
 	if (cmdbuf)
-		etnaviv_gpu_cmdbuf_free(cmdbuf);
+		etnaviv_cmdbuf_free(cmdbuf);
 	if (stream)
 	if (stream)
 		drm_free_large(stream);
 		drm_free_large(stream);
 	if (bos)
 	if (bos)

+ 54 - 41
drivers/gpu/drm/etnaviv/etnaviv_gpu.c

@@ -18,6 +18,8 @@
 #include <linux/dma-fence.h>
 #include <linux/dma-fence.h>
 #include <linux/moduleparam.h>
 #include <linux/moduleparam.h>
 #include <linux/of_device.h>
 #include <linux/of_device.h>
+
+#include "etnaviv_cmdbuf.h"
 #include "etnaviv_dump.h"
 #include "etnaviv_dump.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_gem.h"
@@ -546,6 +548,37 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
 		  VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
 		  VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
 }
 }
 
 
+static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
+{
+	/*
+	 * Base value for VIVS_PM_PULSE_EATER register on models where it
+	 * cannot be read, extracted from vivante kernel driver.
+	 */
+	u32 pulse_eater = 0x01590880;
+
+	if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
+	    etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
+		pulse_eater |= BIT(23);
+
+	}
+
+	if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
+	    etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
+		pulse_eater &= ~BIT(16);
+		pulse_eater |= BIT(17);
+	}
+
+	if ((gpu->identity.revision > 0x5420) &&
+	    (gpu->identity.features & chipFeatures_PIPE_3D))
+	{
+		/* Performance fix: disable internal DFS */
+		pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
+		pulse_eater |= BIT(18);
+	}
+
+	gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
+}
+
 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 {
 {
 	u16 prefetch;
 	u16 prefetch;
@@ -586,6 +619,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 		gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
 		gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
 	}
 	}
 
 
+	/* setup the pulse eater */
+	etnaviv_gpu_setup_pulse_eater(gpu);
+
 	/* setup the MMU */
 	/* setup the MMU */
 	etnaviv_iommu_restore(gpu);
 	etnaviv_iommu_restore(gpu);
 
 
@@ -593,7 +629,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 	prefetch = etnaviv_buffer_init(gpu);
 	prefetch = etnaviv_buffer_init(gpu);
 
 
 	gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
 	gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
-	etnaviv_gpu_start_fe(gpu, etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer),
+	etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(gpu->buffer),
 			     prefetch);
 			     prefetch);
 }
 }
 
 
@@ -658,8 +694,15 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 		goto fail;
 		goto fail;
 	}
 	}
 
 
+	gpu->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(gpu);
+	if (IS_ERR(gpu->cmdbuf_suballoc)) {
+		dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
+		ret = PTR_ERR(gpu->cmdbuf_suballoc);
+		goto fail;
+	}
+
 	/* Create buffer: */
 	/* Create buffer: */
-	gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0);
+	gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0);
 	if (!gpu->buffer) {
 	if (!gpu->buffer) {
 		ret = -ENOMEM;
 		ret = -ENOMEM;
 		dev_err(gpu->dev, "could not create command buffer\n");
 		dev_err(gpu->dev, "could not create command buffer\n");
@@ -667,7 +710,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 	}
 	}
 
 
 	if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
 	if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
-	    gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
+	    etnaviv_cmdbuf_get_va(gpu->buffer) > 0x80000000) {
 		ret = -EINVAL;
 		ret = -EINVAL;
 		dev_err(gpu->dev,
 		dev_err(gpu->dev,
 			"command buffer outside valid memory window\n");
 			"command buffer outside valid memory window\n");
@@ -694,7 +737,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 	return 0;
 	return 0;
 
 
 free_buffer:
 free_buffer:
-	etnaviv_gpu_cmdbuf_free(gpu->buffer);
+	etnaviv_cmdbuf_free(gpu->buffer);
 	gpu->buffer = NULL;
 	gpu->buffer = NULL;
 destroy_iommu:
 destroy_iommu:
 	etnaviv_iommu_destroy(gpu->mmu);
 	etnaviv_iommu_destroy(gpu->mmu);
@@ -1117,41 +1160,6 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
  * Cmdstream submission/retirement:
  * Cmdstream submission/retirement:
  */
  */
 
 
-struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
-	size_t nr_bos)
-{
-	struct etnaviv_cmdbuf *cmdbuf;
-	size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
-				 sizeof(*cmdbuf));
-
-	cmdbuf = kzalloc(sz, GFP_KERNEL);
-	if (!cmdbuf)
-		return NULL;
-
-	if (gpu->mmu->version == ETNAVIV_IOMMU_V2)
-		size = ALIGN(size, SZ_4K);
-
-	cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
-				     GFP_KERNEL);
-	if (!cmdbuf->vaddr) {
-		kfree(cmdbuf);
-		return NULL;
-	}
-
-	cmdbuf->gpu = gpu;
-	cmdbuf->size = size;
-
-	return cmdbuf;
-}
-
-void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
-{
-	etnaviv_iommu_put_cmdbuf_va(cmdbuf->gpu, cmdbuf);
-	dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
-		    cmdbuf->paddr);
-	kfree(cmdbuf);
-}
-
 static void retire_worker(struct work_struct *work)
 static void retire_worker(struct work_struct *work)
 {
 {
 	struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
 	struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
@@ -1177,7 +1185,7 @@ static void retire_worker(struct work_struct *work)
 			etnaviv_gem_mapping_unreference(mapping);
 			etnaviv_gem_mapping_unreference(mapping);
 		}
 		}
 
 
-		etnaviv_gpu_cmdbuf_free(cmdbuf);
+		etnaviv_cmdbuf_free(cmdbuf);
 		/*
 		/*
 		 * We need to balance the runtime PM count caused by
 		 * We need to balance the runtime PM count caused by
 		 * each submission.  Upon submission, we increment
 		 * each submission.  Upon submission, we increment
@@ -1593,10 +1601,15 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
 #endif
 #endif
 
 
 	if (gpu->buffer) {
 	if (gpu->buffer) {
-		etnaviv_gpu_cmdbuf_free(gpu->buffer);
+		etnaviv_cmdbuf_free(gpu->buffer);
 		gpu->buffer = NULL;
 		gpu->buffer = NULL;
 	}
 	}
 
 
+	if (gpu->cmdbuf_suballoc) {
+		etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
+		gpu->cmdbuf_suballoc = NULL;
+	}
+
 	if (gpu->mmu) {
 	if (gpu->mmu) {
 		etnaviv_iommu_destroy(gpu->mmu);
 		etnaviv_iommu_destroy(gpu->mmu);
 		gpu->mmu = NULL;
 		gpu->mmu = NULL;

+ 2 - 26
drivers/gpu/drm/etnaviv/etnaviv_gpu.h

@@ -92,6 +92,7 @@ struct etnaviv_event {
 	struct dma_fence *fence;
 	struct dma_fence *fence;
 };
 };
 
 
+struct etnaviv_cmdbuf_suballoc;
 struct etnaviv_cmdbuf;
 struct etnaviv_cmdbuf;
 
 
 struct etnaviv_gpu {
 struct etnaviv_gpu {
@@ -135,6 +136,7 @@ struct etnaviv_gpu {
 	int irq;
 	int irq;
 
 
 	struct etnaviv_iommu *mmu;
 	struct etnaviv_iommu *mmu;
+	struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
 
 
 	/* Power Control: */
 	/* Power Control: */
 	struct clk *clk_bus;
 	struct clk *clk_bus;
@@ -150,29 +152,6 @@ struct etnaviv_gpu {
 	struct work_struct recover_work;
 	struct work_struct recover_work;
 };
 };
 
 
-struct etnaviv_cmdbuf {
-	/* device this cmdbuf is allocated for */
-	struct etnaviv_gpu *gpu;
-	/* user context key, must be unique between all active users */
-	struct etnaviv_file_private *ctx;
-	/* cmdbuf properties */
-	void *vaddr;
-	dma_addr_t paddr;
-	u32 size;
-	u32 user_size;
-	/* vram node used if the cmdbuf is mapped through the MMUv2 */
-	struct drm_mm_node vram_node;
-	/* fence after which this buffer is to be disposed */
-	struct dma_fence *fence;
-	/* target exec state */
-	u32 exec_state;
-	/* per GPU in-flight list */
-	struct list_head node;
-	/* BOs attached to this command buffer */
-	unsigned int nr_bos;
-	struct etnaviv_vram_mapping *bo_map[0];
-};
-
 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
 {
 {
 	etnaviv_writel(data, gpu->mmio + reg);
 	etnaviv_writel(data, gpu->mmio + reg);
@@ -211,9 +190,6 @@ int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
 	struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
 	struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
 int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
 int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
 	struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf);
 	struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf);
-struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu,
-					      u32 size, size_t nr_bos);
-void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);

+ 1 - 1
drivers/gpu/drm/etnaviv/etnaviv_iommu.c

@@ -184,7 +184,7 @@ static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf)
 	memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
 	memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
 }
 }
 
 
-static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
+static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
 	.ops = {
 	.ops = {
 		.domain_free = etnaviv_domain_free,
 		.domain_free = etnaviv_domain_free,
 		.map = etnaviv_iommuv1_map,
 		.map = etnaviv_iommuv1_map,

+ 4 - 2
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c

@@ -21,6 +21,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/dma-mapping.h>
 #include <linux/bitops.h>
 #include <linux/bitops.h>
 
 
+#include "etnaviv_cmdbuf.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_mmu.h"
 #include "etnaviv_mmu.h"
 #include "etnaviv_iommu.h"
 #include "etnaviv_iommu.h"
@@ -229,7 +230,7 @@ static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
 			memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
 			memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
 }
 }
 
 
-static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
+static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
 	.ops = {
 	.ops = {
 		.domain_free = etnaviv_iommuv2_domain_free,
 		.domain_free = etnaviv_iommuv2_domain_free,
 		.map = etnaviv_iommuv2_map,
 		.map = etnaviv_iommuv2_map,
@@ -254,7 +255,8 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
 	prefetch = etnaviv_buffer_config_mmuv2(gpu,
 	prefetch = etnaviv_buffer_config_mmuv2(gpu,
 				(u32)etnaviv_domain->mtlb_dma,
 				(u32)etnaviv_domain->mtlb_dma,
 				(u32)etnaviv_domain->bad_page_dma);
 				(u32)etnaviv_domain->bad_page_dma);
-	etnaviv_gpu_start_fe(gpu, gpu->buffer->paddr, prefetch);
+	etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
+			     prefetch);
 	etnaviv_gpu_wait_idle(gpu, 100);
 	etnaviv_gpu_wait_idle(gpu, 100);
 
 
 	gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
 	gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);

+ 31 - 38
drivers/gpu/drm/etnaviv/etnaviv_mmu.c

@@ -15,6 +15,7 @@
  */
  */
 
 
 #include "common.xml.h"
 #include "common.xml.h"
+#include "etnaviv_cmdbuf.h"
 #include "etnaviv_drv.h"
 #include "etnaviv_drv.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_gpu.h"
@@ -107,6 +108,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
 				   struct drm_mm_node *node, size_t size)
 				   struct drm_mm_node *node, size_t size)
 {
 {
 	struct etnaviv_vram_mapping *free = NULL;
 	struct etnaviv_vram_mapping *free = NULL;
+	enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
 	int ret;
 	int ret;
 
 
 	lockdep_assert_held(&mmu->lock);
 	lockdep_assert_held(&mmu->lock);
@@ -117,15 +119,10 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
 		struct list_head list;
 		struct list_head list;
 		bool found;
 		bool found;
 
 
-		/*
-		 * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
-		 * drm_mm into giving out a low IOVA after address space
-		 * rollover. This needs a proper fix.
-		 */
 		ret = drm_mm_insert_node_in_range(&mmu->mm, node,
 		ret = drm_mm_insert_node_in_range(&mmu->mm, node,
-			size, 0, mmu->last_iova, ~0UL,
-			mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
-
+						  size, 0, 0,
+						  mmu->last_iova, U64_MAX,
+						  mode);
 		if (ret != -ENOSPC)
 		if (ret != -ENOSPC)
 			break;
 			break;
 
 
@@ -140,7 +137,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
 		}
 		}
 
 
 		/* Try to retire some entries */
 		/* Try to retire some entries */
-		drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, 0);
+		drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
 
 
 		found = 0;
 		found = 0;
 		INIT_LIST_HEAD(&list);
 		INIT_LIST_HEAD(&list);
@@ -192,13 +189,12 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
 			list_del_init(&m->scan_node);
 			list_del_init(&m->scan_node);
 		}
 		}
 
 
+		mode = DRM_MM_INSERT_EVICT;
+
 		/*
 		/*
 		 * We removed enough mappings so that the new allocation will
 		 * We removed enough mappings so that the new allocation will
-		 * succeed.  Ensure that the MMU will be flushed before the
-		 * associated commit requesting this mapping, and retry the
-		 * allocation one more time.
+		 * succeed, retry the allocation one more time.
 		 */
 		 */
-		mmu->need_flush = true;
 	}
 	}
 
 
 	return ret;
 	return ret;
@@ -250,6 +246,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
 	}
 	}
 
 
 	list_add_tail(&mapping->mmu_node, &mmu->mappings);
 	list_add_tail(&mapping->mmu_node, &mmu->mappings);
+	mmu->need_flush = true;
 	mutex_unlock(&mmu->lock);
 	mutex_unlock(&mmu->lock);
 
 
 	return ret;
 	return ret;
@@ -267,6 +264,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
 		etnaviv_iommu_remove_mapping(mmu, mapping);
 		etnaviv_iommu_remove_mapping(mmu, mapping);
 
 
 	list_del(&mapping->mmu_node);
 	list_del(&mapping->mmu_node);
+	mmu->need_flush = true;
 	mutex_unlock(&mmu->lock);
 	mutex_unlock(&mmu->lock);
 }
 }
 
 
@@ -322,55 +320,50 @@ void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
 		etnaviv_iommuv2_restore(gpu);
 		etnaviv_iommuv2_restore(gpu);
 }
 }
 
 
-u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
-				struct etnaviv_cmdbuf *buf)
+int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
+				  struct drm_mm_node *vram_node, size_t size,
+				  u32 *iova)
 {
 {
 	struct etnaviv_iommu *mmu = gpu->mmu;
 	struct etnaviv_iommu *mmu = gpu->mmu;
 
 
 	if (mmu->version == ETNAVIV_IOMMU_V1) {
 	if (mmu->version == ETNAVIV_IOMMU_V1) {
-		return buf->paddr - gpu->memory_base;
+		*iova = paddr - gpu->memory_base;
+		return 0;
 	} else {
 	} else {
 		int ret;
 		int ret;
 
 
-		if (buf->vram_node.allocated)
-			return (u32)buf->vram_node.start;
-
 		mutex_lock(&mmu->lock);
 		mutex_lock(&mmu->lock);
-		ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
-					      buf->size + SZ_64K);
+		ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
 		if (ret < 0) {
 		if (ret < 0) {
 			mutex_unlock(&mmu->lock);
 			mutex_unlock(&mmu->lock);
-			return 0;
+			return ret;
 		}
 		}
-		ret = iommu_map(mmu->domain, buf->vram_node.start, buf->paddr,
-				buf->size, IOMMU_READ);
+		ret = iommu_map(mmu->domain, vram_node->start, paddr, size,
+				IOMMU_READ);
 		if (ret < 0) {
 		if (ret < 0) {
-			drm_mm_remove_node(&buf->vram_node);
+			drm_mm_remove_node(vram_node);
 			mutex_unlock(&mmu->lock);
 			mutex_unlock(&mmu->lock);
-			return 0;
+			return ret;
 		}
 		}
-		/*
-		 * At least on GC3000 the FE MMU doesn't properly flush old TLB
-		 * entries. Make sure to space the command buffers out in a way
-		 * that the FE MMU prefetch won't load invalid entries.
-		 */
-		mmu->last_iova = buf->vram_node.start + buf->size + SZ_64K;
+		mmu->last_iova = vram_node->start + size;
 		gpu->mmu->need_flush = true;
 		gpu->mmu->need_flush = true;
 		mutex_unlock(&mmu->lock);
 		mutex_unlock(&mmu->lock);
 
 
-		return (u32)buf->vram_node.start;
+		*iova = (u32)vram_node->start;
+		return 0;
 	}
 	}
 }
 }
 
 
-void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
-				 struct etnaviv_cmdbuf *buf)
+void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
+				   struct drm_mm_node *vram_node, size_t size,
+				   u32 iova)
 {
 {
 	struct etnaviv_iommu *mmu = gpu->mmu;
 	struct etnaviv_iommu *mmu = gpu->mmu;
 
 
-	if (mmu->version == ETNAVIV_IOMMU_V2 && buf->vram_node.allocated) {
+	if (mmu->version == ETNAVIV_IOMMU_V2) {
 		mutex_lock(&mmu->lock);
 		mutex_lock(&mmu->lock);
-		iommu_unmap(mmu->domain, buf->vram_node.start, buf->size);
-		drm_mm_remove_node(&buf->vram_node);
+		iommu_unmap(mmu->domain,iova, size);
+		drm_mm_remove_node(vram_node);
 		mutex_unlock(&mmu->lock);
 		mutex_unlock(&mmu->lock);
 	}
 	}
 }
 }

+ 6 - 4
drivers/gpu/drm/etnaviv/etnaviv_mmu.h

@@ -62,10 +62,12 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
 	struct etnaviv_vram_mapping *mapping);
 	struct etnaviv_vram_mapping *mapping);
 void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
 void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
 
 
-u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
-				struct etnaviv_cmdbuf *buf);
-void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
-				 struct etnaviv_cmdbuf *buf);
+int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
+				  struct drm_mm_node *vram_node, size_t size,
+				  u32 *iova);
+void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
+				   struct drm_mm_node *vram_node, size_t size,
+				   u32 iova);
 
 
 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);

+ 71 - 21
drivers/gpu/drm/exynos/exynos5433_drm_decon.c

@@ -13,9 +13,11 @@
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/component.h>
+#include <linux/mfd/syscon.h>
 #include <linux/of_device.h>
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <linux/of_gpio.h>
 #include <linux/pm_runtime.h>
 #include <linux/pm_runtime.h>
+#include <linux/regmap.h>
 
 
 #include <video/exynos5433_decon.h>
 #include <video/exynos5433_decon.h>
 
 
@@ -25,6 +27,9 @@
 #include "exynos_drm_plane.h"
 #include "exynos_drm_plane.h"
 #include "exynos_drm_iommu.h"
 #include "exynos_drm_iommu.h"
 
 
+#define DSD_CFG_MUX 0x1004
+#define DSD_CFG_MUX_TE_UNMASK_GLOBAL BIT(13)
+
 #define WINDOWS_NR	3
 #define WINDOWS_NR	3
 #define MIN_FB_WIDTH_FOR_16WORD_BURST	128
 #define MIN_FB_WIDTH_FOR_16WORD_BURST	128
 
 
@@ -57,6 +62,7 @@ struct decon_context {
 	struct exynos_drm_plane		planes[WINDOWS_NR];
 	struct exynos_drm_plane		planes[WINDOWS_NR];
 	struct exynos_drm_plane_config	configs[WINDOWS_NR];
 	struct exynos_drm_plane_config	configs[WINDOWS_NR];
 	void __iomem			*addr;
 	void __iomem			*addr;
+	struct regmap			*sysreg;
 	struct clk			*clks[ARRAY_SIZE(decon_clks_name)];
 	struct clk			*clks[ARRAY_SIZE(decon_clks_name)];
 	int				pipe;
 	int				pipe;
 	unsigned long			flags;
 	unsigned long			flags;
@@ -118,18 +124,29 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
 
 
 static void decon_setup_trigger(struct decon_context *ctx)
 static void decon_setup_trigger(struct decon_context *ctx)
 {
 {
-	u32 val = !(ctx->out_type & I80_HW_TRG)
-		? TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
-		  TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN
-		: TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
-		  TRIGCON_HWTRIGMASK | TRIGCON_HWTRIGEN;
-	writel(val, ctx->addr + DECON_TRIGCON);
+	if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)))
+		return;
+
+	if (!(ctx->out_type & I80_HW_TRG)) {
+		writel(TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN
+		       | TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN,
+		       ctx->addr + DECON_TRIGCON);
+		return;
+	}
+
+	writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F | TRIGCON_HWTRIGMASK
+	       | TRIGCON_HWTRIGEN, ctx->addr + DECON_TRIGCON);
+
+	if (regmap_update_bits(ctx->sysreg, DSD_CFG_MUX,
+			       DSD_CFG_MUX_TE_UNMASK_GLOBAL, ~0))
+		DRM_ERROR("Cannot update sysreg.\n");
 }
 }
 
 
 static void decon_commit(struct exynos_drm_crtc *crtc)
 static void decon_commit(struct exynos_drm_crtc *crtc)
 {
 {
 	struct decon_context *ctx = crtc->ctx;
 	struct decon_context *ctx = crtc->ctx;
 	struct drm_display_mode *m = &crtc->base.mode;
 	struct drm_display_mode *m = &crtc->base.mode;
+	bool interlaced = false;
 	u32 val;
 	u32 val;
 
 
 	if (test_bit(BIT_SUSPENDED, &ctx->flags))
 	if (test_bit(BIT_SUSPENDED, &ctx->flags))
@@ -140,13 +157,16 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
 		m->crtc_hsync_end = m->crtc_htotal - 92;
 		m->crtc_hsync_end = m->crtc_htotal - 92;
 		m->crtc_vsync_start = m->crtc_vdisplay + 1;
 		m->crtc_vsync_start = m->crtc_vdisplay + 1;
 		m->crtc_vsync_end = m->crtc_vsync_start + 1;
 		m->crtc_vsync_end = m->crtc_vsync_start + 1;
+		if (m->flags & DRM_MODE_FLAG_INTERLACE)
+			interlaced = true;
 	}
 	}
 
 
-	if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
-		decon_setup_trigger(ctx);
+	decon_setup_trigger(ctx);
 
 
 	/* lcd on and use command if */
 	/* lcd on and use command if */
 	val = VIDOUT_LCD_ON;
 	val = VIDOUT_LCD_ON;
+	if (interlaced)
+		val |= VIDOUT_INTERLACE_EN_F;
 	if (ctx->out_type & IFTYPE_I80) {
 	if (ctx->out_type & IFTYPE_I80) {
 		val |= VIDOUT_COMMAND_IF;
 		val |= VIDOUT_COMMAND_IF;
 	} else {
 	} else {
@@ -155,15 +175,21 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
 
 
 	writel(val, ctx->addr + DECON_VIDOUTCON0);
 	writel(val, ctx->addr + DECON_VIDOUTCON0);
 
 
-	val = VIDTCON2_LINEVAL(m->vdisplay - 1) |
-		VIDTCON2_HOZVAL(m->hdisplay - 1);
+	if (interlaced)
+		val = VIDTCON2_LINEVAL(m->vdisplay / 2 - 1) |
+			VIDTCON2_HOZVAL(m->hdisplay - 1);
+	else
+		val = VIDTCON2_LINEVAL(m->vdisplay - 1) |
+			VIDTCON2_HOZVAL(m->hdisplay - 1);
 	writel(val, ctx->addr + DECON_VIDTCON2);
 	writel(val, ctx->addr + DECON_VIDTCON2);
 
 
 	if (!(ctx->out_type & IFTYPE_I80)) {
 	if (!(ctx->out_type & IFTYPE_I80)) {
-		val = VIDTCON00_VBPD_F(
-				m->crtc_vtotal - m->crtc_vsync_end - 1) |
-			VIDTCON00_VFPD_F(
-				m->crtc_vsync_start - m->crtc_vdisplay - 1);
+		int vbp = m->crtc_vtotal - m->crtc_vsync_end;
+		int vfp = m->crtc_vsync_start - m->crtc_vdisplay;
+
+		if (interlaced)
+			vbp = vbp / 2 - 1;
+		val = VIDTCON00_VBPD_F(vbp - 1) | VIDTCON00_VFPD_F(vfp - 1);
 		writel(val, ctx->addr + DECON_VIDTCON00);
 		writel(val, ctx->addr + DECON_VIDTCON00);
 
 
 		val = VIDTCON01_VSPW_F(
 		val = VIDTCON01_VSPW_F(
@@ -278,12 +304,22 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
 	if (test_bit(BIT_SUSPENDED, &ctx->flags))
 	if (test_bit(BIT_SUSPENDED, &ctx->flags))
 		return;
 		return;
 
 
-	val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y);
-	writel(val, ctx->addr + DECON_VIDOSDxA(win));
+	if (crtc->base.mode.flags & DRM_MODE_FLAG_INTERLACE) {
+		val = COORDINATE_X(state->crtc.x) |
+			COORDINATE_Y(state->crtc.y / 2);
+		writel(val, ctx->addr + DECON_VIDOSDxA(win));
+
+		val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
+			COORDINATE_Y((state->crtc.y + state->crtc.h) / 2 - 1);
+		writel(val, ctx->addr + DECON_VIDOSDxB(win));
+	} else {
+		val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y);
+		writel(val, ctx->addr + DECON_VIDOSDxA(win));
 
 
-	val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
-		COORDINATE_Y(state->crtc.y + state->crtc.h - 1);
-	writel(val, ctx->addr + DECON_VIDOSDxB(win));
+		val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
+				COORDINATE_Y(state->crtc.y + state->crtc.h - 1);
+		writel(val, ctx->addr + DECON_VIDOSDxB(win));
+	}
 
 
 	val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
 	val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
 		VIDOSD_Wx_ALPHA_B_F(0x0);
 		VIDOSD_Wx_ALPHA_B_F(0x0);
@@ -355,8 +391,6 @@ static void decon_swreset(struct decon_context *ctx)
 		udelay(10);
 		udelay(10);
 	}
 	}
 
 
-	WARN(tries == 0, "failed to disable DECON\n");
-
 	writel(VIDCON0_SWRESET, ctx->addr + DECON_VIDCON0);
 	writel(VIDCON0_SWRESET, ctx->addr + DECON_VIDCON0);
 	for (tries = 2000; tries; --tries) {
 	for (tries = 2000; tries; --tries) {
 		if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_SWRESET)
 		if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_SWRESET)
@@ -557,6 +591,13 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
 
 
 	if (val) {
 	if (val) {
 		writel(val, ctx->addr + DECON_VIDINTCON1);
 		writel(val, ctx->addr + DECON_VIDINTCON1);
+		if (ctx->out_type & IFTYPE_HDMI) {
+			val = readl(ctx->addr + DECON_VIDOUTCON0);
+			val &= VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F;
+			if (val ==
+			    (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F))
+				return IRQ_HANDLED;
+		}
 		drm_crtc_handle_vblank(&ctx->crtc->base);
 		drm_crtc_handle_vblank(&ctx->crtc->base);
 	}
 	}
 
 
@@ -637,6 +678,15 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
 		ctx->out_type |= IFTYPE_I80;
 		ctx->out_type |= IFTYPE_I80;
 	}
 	}
 
 
+	if (ctx->out_type | I80_HW_TRG) {
+		ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+							"samsung,disp-sysreg");
+		if (IS_ERR(ctx->sysreg)) {
+			dev_err(dev, "failed to get system register\n");
+			return PTR_ERR(ctx->sysreg);
+		}
+	}
+
 	for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
 	for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
 		struct clk *clk;
 		struct clk *clk;
 
 

+ 1 - 4
drivers/gpu/drm/exynos/exynos_drm_fbdev.c

@@ -208,7 +208,6 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
 	struct exynos_drm_fbdev *fbdev;
 	struct exynos_drm_fbdev *fbdev;
 	struct exynos_drm_private *private = dev->dev_private;
 	struct exynos_drm_private *private = dev->dev_private;
 	struct drm_fb_helper *helper;
 	struct drm_fb_helper *helper;
-	unsigned int num_crtc;
 	int ret;
 	int ret;
 
 
 	if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
 	if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
@@ -225,9 +224,7 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
 
 
 	drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
 	drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
 
 
-	num_crtc = dev->mode_config.num_crtc;
-
-	ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
+	ret = drm_fb_helper_init(dev, helper, MAX_CONNECTOR);
 	if (ret < 0) {
 	if (ret < 0) {
 		DRM_ERROR("failed to initialize drm fb helper.\n");
 		DRM_ERROR("failed to initialize drm fb helper.\n");
 		goto err_init;
 		goto err_init;

+ 0 - 2
drivers/gpu/drm/exynos/exynos_drm_fimd.c

@@ -125,10 +125,8 @@ static struct fimd_driver_data exynos3_fimd_driver_data = {
 	.timing_base = 0x20000,
 	.timing_base = 0x20000,
 	.lcdblk_offset = 0x210,
 	.lcdblk_offset = 0x210,
 	.lcdblk_bypass_shift = 1,
 	.lcdblk_bypass_shift = 1,
-	.trg_type = I80_HW_TRG,
 	.has_shadowcon = 1,
 	.has_shadowcon = 1,
 	.has_vidoutcon = 1,
 	.has_vidoutcon = 1,
-	.has_trigger_per_te = 1,
 };
 };
 
 
 static struct fimd_driver_data exynos4_fimd_driver_data = {
 static struct fimd_driver_data exynos4_fimd_driver_data = {

+ 1 - 1
drivers/gpu/drm/exynos/exynos_drm_g2d.c

@@ -1683,7 +1683,7 @@ struct platform_driver g2d_driver = {
 	.probe		= g2d_probe,
 	.probe		= g2d_probe,
 	.remove		= g2d_remove,
 	.remove		= g2d_remove,
 	.driver		= {
 	.driver		= {
-		.name	= "s5p-g2d",
+		.name	= "exynos-drm-g2d",
 		.owner	= THIS_MODULE,
 		.owner	= THIS_MODULE,
 		.pm	= &g2d_pm_ops,
 		.pm	= &g2d_pm_ops,
 		.of_match_table = exynos_g2d_match,
 		.of_match_table = exynos_g2d_match,

+ 63 - 17
drivers/gpu/drm/exynos/exynos_hdmi.c

@@ -35,6 +35,7 @@
 #include <linux/io.h>
 #include <linux/io.h>
 #include <linux/of_address.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_device.h>
+#include <linux/of_graph.h>
 #include <linux/hdmi.h>
 #include <linux/hdmi.h>
 #include <linux/component.h>
 #include <linux/component.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon.h>
@@ -133,6 +134,7 @@ struct hdmi_context {
 	struct regulator_bulk_data	regul_bulk[ARRAY_SIZE(supply)];
 	struct regulator_bulk_data	regul_bulk[ARRAY_SIZE(supply)];
 	struct regulator		*reg_hdmi_en;
 	struct regulator		*reg_hdmi_en;
 	struct exynos_drm_clk		phy_clk;
 	struct exynos_drm_clk		phy_clk;
+	struct drm_bridge		*bridge;
 };
 };
 
 
 static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
 static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
@@ -509,9 +511,9 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = {
 	{
 	{
 		.pixel_clock = 27000000,
 		.pixel_clock = 27000000,
 		.conf = {
 		.conf = {
-			0x01, 0x51, 0x22, 0x51, 0x08, 0xfc, 0x88, 0x46,
-			0x72, 0x50, 0x24, 0x0c, 0x24, 0x0f, 0x7c, 0xa5,
-			0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
+			0x01, 0x51, 0x2d, 0x75, 0x01, 0x00, 0x88, 0x02,
+			0x72, 0x50, 0x44, 0x8c, 0x27, 0x00, 0x7c, 0xac,
+			0xd6, 0x2b, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
 			0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
 			0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
 		},
 		},
 	},
 	},
@@ -519,9 +521,9 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = {
 		.pixel_clock = 27027000,
 		.pixel_clock = 27027000,
 		.conf = {
 		.conf = {
 			0x01, 0x51, 0x2d, 0x72, 0x64, 0x09, 0x88, 0xc3,
 			0x01, 0x51, 0x2d, 0x72, 0x64, 0x09, 0x88, 0xc3,
-			0x71, 0x50, 0x24, 0x14, 0x24, 0x0f, 0x7c, 0xa5,
-			0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
-			0x28, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+			0x71, 0x50, 0x44, 0x8c, 0x27, 0x00, 0x7c, 0xac,
+			0xd6, 0x2b, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+			0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
 		},
 		},
 	},
 	},
 	{
 	{
@@ -587,6 +589,15 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = {
 			0x08, 0x10, 0x01, 0x01, 0x48, 0x4a, 0x00, 0x40,
 			0x08, 0x10, 0x01, 0x01, 0x48, 0x4a, 0x00, 0x40,
 		},
 		},
 	},
 	},
+	{
+		.pixel_clock = 297000000,
+		.conf = {
+			0x01, 0x51, 0x3E, 0x05, 0x40, 0xF0, 0x88, 0xC2,
+			0x52, 0x53, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+			0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+			0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+		},
+	},
 };
 };
 
 
 static const char * const hdmi_clk_gates4[] = {
 static const char * const hdmi_clk_gates4[] = {
@@ -788,7 +799,8 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata)
 				sizeof(buf));
 				sizeof(buf));
 	if (ret > 0) {
 	if (ret > 0) {
 		hdmi_reg_writeb(hdata, HDMI_VSI_CON, HDMI_VSI_CON_EVERY_VSYNC);
 		hdmi_reg_writeb(hdata, HDMI_VSI_CON, HDMI_VSI_CON_EVERY_VSYNC);
-		hdmi_reg_write_buf(hdata, HDMI_VSI_HEADER0, buf, ret);
+		hdmi_reg_write_buf(hdata, HDMI_VSI_HEADER0, buf, 3);
+		hdmi_reg_write_buf(hdata, HDMI_VSI_DATA(0), buf + 3, ret - 3);
 	}
 	}
 
 
 	ret = hdmi_audio_infoframe_init(&frm.audio);
 	ret = hdmi_audio_infoframe_init(&frm.audio);
@@ -912,7 +924,15 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
 	drm_connector_register(connector);
 	drm_connector_register(connector);
 	drm_mode_connector_attach_encoder(connector, encoder);
 	drm_mode_connector_attach_encoder(connector, encoder);
 
 
-	return 0;
+	if (hdata->bridge) {
+		encoder->bridge = hdata->bridge;
+		hdata->bridge->encoder = encoder;
+		ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
+		if (ret)
+			DRM_ERROR("Failed to attach bridge\n");
+	}
+
+	return ret;
 }
 }
 
 
 static bool hdmi_mode_fixup(struct drm_encoder *encoder,
 static bool hdmi_mode_fixup(struct drm_encoder *encoder,
@@ -1581,6 +1601,31 @@ static void hdmiphy_clk_enable(struct exynos_drm_clk *clk, bool enable)
 		hdmiphy_disable(hdata);
 		hdmiphy_disable(hdata);
 }
 }
 
 
+static int hdmi_bridge_init(struct hdmi_context *hdata)
+{
+	struct device *dev = hdata->dev;
+	struct device_node *ep, *np;
+
+	ep = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1);
+	if (!ep)
+		return 0;
+
+	np = of_graph_get_remote_port_parent(ep);
+	of_node_put(ep);
+	if (!np) {
+		DRM_ERROR("failed to get remote port parent");
+		return -EINVAL;
+	}
+
+	hdata->bridge = of_drm_find_bridge(np);
+	of_node_put(np);
+
+	if (!hdata->bridge)
+		return -EPROBE_DEFER;
+
+	return 0;
+}
+
 static int hdmi_resources_init(struct hdmi_context *hdata)
 static int hdmi_resources_init(struct hdmi_context *hdata)
 {
 {
 	struct device *dev = hdata->dev;
 	struct device *dev = hdata->dev;
@@ -1620,17 +1665,18 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
 
 
 	hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en");
 	hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en");
 
 
-	if (PTR_ERR(hdata->reg_hdmi_en) == -ENODEV)
-		return 0;
+	if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) {
+		if (IS_ERR(hdata->reg_hdmi_en))
+			return PTR_ERR(hdata->reg_hdmi_en);
 
 
-	if (IS_ERR(hdata->reg_hdmi_en))
-		return PTR_ERR(hdata->reg_hdmi_en);
-
-	ret = regulator_enable(hdata->reg_hdmi_en);
-	if (ret)
-		DRM_ERROR("failed to enable hdmi-en regulator\n");
+		ret = regulator_enable(hdata->reg_hdmi_en);
+		if (ret) {
+			DRM_ERROR("failed to enable hdmi-en regulator\n");
+			return ret;
+		}
+	}
 
 
-	return ret;
+	return hdmi_bridge_init(hdata);
 }
 }
 
 
 static struct of_device_id hdmi_match_types[] = {
 static struct of_device_id hdmi_match_types[] = {

+ 1 - 1
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c

@@ -94,7 +94,7 @@ static int fsl_dcu_load(struct drm_device *dev, unsigned long flags)
 			"Invalid legacyfb_depth.  Defaulting to 24bpp\n");
 			"Invalid legacyfb_depth.  Defaulting to 24bpp\n");
 		legacyfb_depth = 24;
 		legacyfb_depth = 24;
 	}
 	}
-	fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1, 1);
+	fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1);
 	if (IS_ERR(fsl_dev->fbdev)) {
 	if (IS_ERR(fsl_dev->fbdev)) {
 		ret = PTR_ERR(fsl_dev->fbdev);
 		ret = PTR_ERR(fsl_dev->fbdev);
 		fsl_dev->fbdev = NULL;
 		fsl_dev->fbdev = NULL;

+ 7 - 5
drivers/gpu/drm/fsl-dcu/fsl_tcon.c

@@ -72,10 +72,8 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev)
 		return NULL;
 		return NULL;
 
 
 	tcon = devm_kzalloc(dev, sizeof(*tcon), GFP_KERNEL);
 	tcon = devm_kzalloc(dev, sizeof(*tcon), GFP_KERNEL);
-	if (!tcon) {
-		ret = -ENOMEM;
+	if (!tcon)
 		goto err_node_put;
 		goto err_node_put;
-	}
 
 
 	ret = fsl_tcon_init_regmap(dev, tcon, np);
 	ret = fsl_tcon_init_regmap(dev, tcon, np);
 	if (ret) {
 	if (ret) {
@@ -89,9 +87,13 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev)
 		goto err_node_put;
 		goto err_node_put;
 	}
 	}
 
 
-	of_node_put(np);
-	clk_prepare_enable(tcon->ipg_clk);
+	ret = clk_prepare_enable(tcon->ipg_clk);
+	if (ret) {
+		dev_err(dev, "Couldn't enable the TCON clock\n");
+		goto err_node_put;
+	}
 
 
+	of_node_put(np);
 	dev_info(dev, "Using TCON in bypass mode\n");
 	dev_info(dev, "Using TCON in bypass mode\n");
 
 
 	return tcon;
 	return tcon;

+ 1 - 1
drivers/gpu/drm/gma500/framebuffer.c

@@ -564,7 +564,7 @@ int psb_fbdev_init(struct drm_device *dev)
 	drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
 	drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
 
 
 	ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper,
 	ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper,
-				 dev_priv->ops->crtcs, INTELFB_CONN_LIMIT);
+				 INTELFB_CONN_LIMIT);
 	if (ret)
 	if (ret)
 		goto free;
 		goto free;
 
 

+ 1 - 2
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c

@@ -200,8 +200,7 @@ int hibmc_fbdev_init(struct hibmc_drm_private *priv)
 			      &hibmc_fbdev_helper_funcs);
 			      &hibmc_fbdev_helper_funcs);
 
 
 	/* Now just one crtc and one channel */
 	/* Now just one crtc and one channel */
-	ret = drm_fb_helper_init(priv->dev,
-				 &hifbdev->helper, 1, 1);
+	ret = drm_fb_helper_init(priv->dev, &hifbdev->helper, 1);
 	if (ret) {
 	if (ret) {
 		DRM_ERROR("failed to initialize fb helper: %d\n", ret);
 		DRM_ERROR("failed to initialize fb helper: %d\n", ret);
 		return ret;
 		return ret;

+ 1 - 2
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c

@@ -59,8 +59,7 @@ static void kirin_fbdev_output_poll_changed(struct drm_device *dev)
 		drm_fbdev_cma_hotplug_event(priv->fbdev);
 		drm_fbdev_cma_hotplug_event(priv->fbdev);
 	} else {
 	} else {
 		priv->fbdev = drm_fbdev_cma_init(dev, 32,
 		priv->fbdev = drm_fbdev_cma_init(dev, 32,
-				dev->mode_config.num_crtc,
-				dev->mode_config.num_connector);
+						 dev->mode_config.num_connector);
 		if (IS_ERR(priv->fbdev))
 		if (IS_ERR(priv->fbdev))
 			priv->fbdev = NULL;
 			priv->fbdev = NULL;
 	}
 	}

+ 4 - 6
drivers/gpu/drm/i915/i915_gem.c

@@ -69,12 +69,10 @@ insert_mappable_node(struct i915_ggtt *ggtt,
                      struct drm_mm_node *node, u32 size)
                      struct drm_mm_node *node, u32 size)
 {
 {
 	memset(node, 0, sizeof(*node));
 	memset(node, 0, sizeof(*node));
-	return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
-						   size, 0,
-						   I915_COLOR_UNEVICTABLE,
-						   0, ggtt->mappable_end,
-						   DRM_MM_SEARCH_DEFAULT,
-						   DRM_MM_CREATE_DEFAULT);
+	return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
+					   size, 0, I915_COLOR_UNEVICTABLE,
+					   0, ggtt->mappable_end,
+					   DRM_MM_INSERT_LOW);
 }
 }
 
 
 static void
 static void

+ 7 - 2
drivers/gpu/drm/i915/i915_gem_evict.c

@@ -109,6 +109,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
 	}, **phase;
 	}, **phase;
 	struct i915_vma *vma, *next;
 	struct i915_vma *vma, *next;
 	struct drm_mm_node *node;
 	struct drm_mm_node *node;
+	enum drm_mm_insert_mode mode;
 	int ret;
 	int ret;
 
 
 	lockdep_assert_held(&vm->i915->drm.struct_mutex);
 	lockdep_assert_held(&vm->i915->drm.struct_mutex);
@@ -127,10 +128,14 @@ i915_gem_evict_something(struct i915_address_space *vm,
 	 * On each list, the oldest objects lie at the HEAD with the freshest
 	 * On each list, the oldest objects lie at the HEAD with the freshest
 	 * object on the TAIL.
 	 * object on the TAIL.
 	 */
 	 */
+	mode = DRM_MM_INSERT_BEST;
+	if (flags & PIN_HIGH)
+		mode = DRM_MM_INSERT_HIGH;
+	if (flags & PIN_MAPPABLE)
+		mode = DRM_MM_INSERT_LOW;
 	drm_mm_scan_init_with_range(&scan, &vm->mm,
 	drm_mm_scan_init_with_range(&scan, &vm->mm,
 				    min_size, alignment, cache_level,
 				    min_size, alignment, cache_level,
-				    start, end,
-				    flags & PIN_HIGH ? DRM_MM_CREATE_TOP : 0);
+				    start, end, mode);
 
 
 	/* Retire before we search the active list. Although we have
 	/* Retire before we search the active list. Although we have
 	 * reasonable accuracy in our retirement lists, we may have
 	 * reasonable accuracy in our retirement lists, we may have

+ 2 - 3
drivers/gpu/drm/i915/i915_gem_execbuffer.c

@@ -437,12 +437,11 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
 					       PIN_MAPPABLE | PIN_NONBLOCK);
 					       PIN_MAPPABLE | PIN_NONBLOCK);
 		if (IS_ERR(vma)) {
 		if (IS_ERR(vma)) {
 			memset(&cache->node, 0, sizeof(cache->node));
 			memset(&cache->node, 0, sizeof(cache->node));
-			ret = drm_mm_insert_node_in_range_generic
+			ret = drm_mm_insert_node_in_range
 				(&ggtt->base.mm, &cache->node,
 				(&ggtt->base.mm, &cache->node,
 				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
 				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
 				 0, ggtt->mappable_end,
 				 0, ggtt->mappable_end,
-				 DRM_MM_SEARCH_DEFAULT,
-				 DRM_MM_CREATE_DEFAULT);
+				 DRM_MM_INSERT_LOW);
 			if (ret) /* no inactive aperture space, use cpu reloc */
 			if (ret) /* no inactive aperture space, use cpu reloc */
 				return NULL;
 				return NULL;
 		} else {
 		} else {

+ 16 - 23
drivers/gpu/drm/i915/i915_gem_gtt.c

@@ -2754,12 +2754,10 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 		return ret;
 		return ret;
 
 
 	/* Reserve a mappable slot for our lockless error capture */
 	/* Reserve a mappable slot for our lockless error capture */
-	ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
-						  &ggtt->error_capture,
-						  PAGE_SIZE, 0,
-						  I915_COLOR_UNEVICTABLE,
-						  0, ggtt->mappable_end,
-						  0, 0);
+	ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
+					  PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
+					  0, ggtt->mappable_end,
+					  DRM_MM_INSERT_LOW);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
@@ -3669,7 +3667,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 			u64 size, u64 alignment, unsigned long color,
 			u64 size, u64 alignment, unsigned long color,
 			u64 start, u64 end, unsigned int flags)
 			u64 start, u64 end, unsigned int flags)
 {
 {
-	u32 search_flag, alloc_flag;
+	enum drm_mm_insert_mode mode;
 	u64 offset;
 	u64 offset;
 	int err;
 	int err;
 
 
@@ -3690,13 +3688,11 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 	if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
 	if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
 		return -ENOSPC;
 		return -ENOSPC;
 
 
-	if (flags & PIN_HIGH) {
-		search_flag = DRM_MM_SEARCH_BELOW;
-		alloc_flag = DRM_MM_CREATE_TOP;
-	} else {
-		search_flag = DRM_MM_SEARCH_DEFAULT;
-		alloc_flag = DRM_MM_CREATE_DEFAULT;
-	}
+	mode = DRM_MM_INSERT_BEST;
+	if (flags & PIN_HIGH)
+		mode = DRM_MM_INSERT_HIGH;
+	if (flags & PIN_MAPPABLE)
+		mode = DRM_MM_INSERT_LOW;
 
 
 	/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
 	/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
 	 * so we know that we always have a minimum alignment of 4096.
 	 * so we know that we always have a minimum alignment of 4096.
@@ -3708,10 +3704,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 	if (alignment <= I915_GTT_MIN_ALIGNMENT)
 	if (alignment <= I915_GTT_MIN_ALIGNMENT)
 		alignment = 0;
 		alignment = 0;
 
 
-	err = drm_mm_insert_node_in_range_generic(&vm->mm, node,
-						  size, alignment, color,
-						  start, end,
-						  search_flag, alloc_flag);
+	err = drm_mm_insert_node_in_range(&vm->mm, node,
+					  size, alignment, color,
+					  start, end, mode);
 	if (err != -ENOSPC)
 	if (err != -ENOSPC)
 		return err;
 		return err;
 
 
@@ -3749,9 +3744,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	search_flag = DRM_MM_SEARCH_DEFAULT;
-	return drm_mm_insert_node_in_range_generic(&vm->mm, node,
-						   size, alignment, color,
-						   start, end,
-						   search_flag, alloc_flag);
+	return drm_mm_insert_node_in_range(&vm->mm, node,
+					   size, alignment, color,
+					   start, end, DRM_MM_INSERT_EVICT);
 }
 }

+ 3 - 3
drivers/gpu/drm/i915/i915_gem_stolen.c

@@ -55,9 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
 		return -ENODEV;
 		return -ENODEV;
 
 
 	mutex_lock(&dev_priv->mm.stolen_lock);
 	mutex_lock(&dev_priv->mm.stolen_lock);
-	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
-					  alignment, start, end,
-					  DRM_MM_SEARCH_DEFAULT);
+	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
+					  size, alignment, 0,
+					  start, end, DRM_MM_INSERT_BEST);
 	mutex_unlock(&dev_priv->mm.stolen_lock);
 	mutex_unlock(&dev_priv->mm.stolen_lock);
 
 
 	return ret;
 	return ret;

+ 1 - 2
drivers/gpu/drm/i915/intel_fbdev.c

@@ -713,8 +713,7 @@ int intel_fbdev_init(struct drm_device *dev)
 	if (!intel_fbdev_init_bios(dev, ifbdev))
 	if (!intel_fbdev_init_bios(dev, ifbdev))
 		ifbdev->preferred_bpp = 32;
 		ifbdev->preferred_bpp = 32;
 
 
-	ret = drm_fb_helper_init(dev, &ifbdev->helper,
-				 INTEL_INFO(dev_priv)->num_pipes, 4);
+	ret = drm_fb_helper_init(dev, &ifbdev->helper, 4);
 	if (ret) {
 	if (ret) {
 		kfree(ifbdev);
 		kfree(ifbdev);
 		return ret;
 		return ret;

+ 1 - 2
drivers/gpu/drm/imx/imx-drm-core.c

@@ -389,8 +389,7 @@ static int imx_drm_bind(struct device *dev)
 		dev_warn(dev, "Invalid legacyfb_depth.  Defaulting to 16bpp\n");
 		dev_warn(dev, "Invalid legacyfb_depth.  Defaulting to 16bpp\n");
 		legacyfb_depth = 16;
 		legacyfb_depth = 16;
 	}
 	}
-	imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
-				drm->mode_config.num_crtc, MAX_CRTC);
+	imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth, MAX_CRTC);
 	if (IS_ERR(imxdrm->fbhelper)) {
 	if (IS_ERR(imxdrm->fbhelper)) {
 		ret = PTR_ERR(imxdrm->fbhelper);
 		ret = PTR_ERR(imxdrm->fbhelper);
 		imxdrm->fbhelper = NULL;
 		imxdrm->fbhelper = NULL;

+ 3 - 3
drivers/gpu/drm/meson/Makefile

@@ -1,4 +1,4 @@
-meson-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
-meson-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o
+meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
+meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o
 
 
-obj-$(CONFIG_DRM_MESON) += meson.o
+obj-$(CONFIG_DRM_MESON) += meson-drm.o

+ 1 - 3
drivers/gpu/drm/meson/meson_drv.c

@@ -279,7 +279,6 @@ static int meson_drv_probe(struct platform_device *pdev)
 	drm->mode_config.funcs = &meson_mode_config_funcs;
 	drm->mode_config.funcs = &meson_mode_config_funcs;
 
 
 	priv->fbdev = drm_fbdev_cma_init(drm, 32,
 	priv->fbdev = drm_fbdev_cma_init(drm, 32,
-					 drm->mode_config.num_crtc,
 					 drm->mode_config.num_connector);
 					 drm->mode_config.num_connector);
 	if (IS_ERR(priv->fbdev)) {
 	if (IS_ERR(priv->fbdev)) {
 		ret = PTR_ERR(priv->fbdev);
 		ret = PTR_ERR(priv->fbdev);
@@ -329,8 +328,7 @@ static struct platform_driver meson_drm_platform_driver = {
 	.probe      = meson_drv_probe,
 	.probe      = meson_drv_probe,
 	.remove     = meson_drv_remove,
 	.remove     = meson_drv_remove,
 	.driver     = {
 	.driver     = {
-		.owner  = THIS_MODULE,
-		.name   = DRIVER_NAME,
+		.name	= "meson-drm",
 		.of_match_table = dt_match,
 		.of_match_table = dt_match,
 	},
 	},
 };
 };

+ 1 - 1
drivers/gpu/drm/mgag200/mgag200_fb.c

@@ -286,7 +286,7 @@ int mgag200_fbdev_init(struct mga_device *mdev)
 	drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs);
 	drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs);
 
 
 	ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
 	ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
-				 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
+				 MGAG200FB_CONN_LIMIT);
 	if (ret)
 	if (ret)
 		goto err_fb_helper;
 		goto err_fb_helper;
 
 

+ 7 - 0
drivers/gpu/drm/msm/Kconfig

@@ -72,3 +72,10 @@ config DRM_MSM_DSI_28NM_8960_PHY
 	help
 	help
 	  Choose this option if the 28nm DSI PHY 8960 variant is used on the
 	  Choose this option if the 28nm DSI PHY 8960 variant is used on the
 	  platform.
 	  platform.
+
+config DRM_MSM_DSI_14NM_PHY
+	bool "Enable DSI 14nm PHY driver in MSM DRM (used by MSM8996/APQ8096)"
+	depends on DRM_MSM_DSI
+	default y
+	help
+	  Choose this option if DSI PHY on 8996 is used on the platform.

+ 2 - 0
drivers/gpu/drm/msm/Makefile

@@ -76,11 +76,13 @@ msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
 msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
 msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
 msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
 msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
 msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
 msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
+msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
 
 
 ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
 ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
 msm-y += dsi/pll/dsi_pll.o
 msm-y += dsi/pll/dsi_pll.o
 msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
 msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
 msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
 msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
+msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
 endif
 endif
 
 
 obj-$(CONFIG_DRM_MSM)	+= msm.o
 obj-$(CONFIG_DRM_MSM)	+= msm.o

+ 19 - 2
drivers/gpu/drm/msm/adreno/a5xx_gpu.c

@@ -12,6 +12,7 @@
  */
  */
 
 
 #include "msm_gem.h"
 #include "msm_gem.h"
+#include "msm_mmu.h"
 #include "a5xx_gpu.h"
 #include "a5xx_gpu.h"
 
 
 extern bool hang_debug;
 extern bool hang_debug;
@@ -327,7 +328,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
 	/* Enable RBBM error reporting bits */
 	/* Enable RBBM error reporting bits */
 	gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
 	gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
 
 
-	if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
+	if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
 		/*
 		/*
 		 * Mask out the activity signals from RB1-3 to avoid false
 		 * Mask out the activity signals from RB1-3 to avoid false
 		 * positives
 		 * positives
@@ -381,7 +382,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
 
 
 	gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
 	gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
 
 
-	if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
+	if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
 		gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
 		gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
 
 
 	gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
 	gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
@@ -573,6 +574,19 @@ static bool a5xx_idle(struct msm_gpu *gpu)
 	return true;
 	return true;
 }
 }
 
 
+static int a5xx_fault_handler(void *arg, unsigned long iova, int flags)
+{
+	struct msm_gpu *gpu = arg;
+	pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
+			iova, flags,
+			gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
+			gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
+			gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
+			gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)));
+
+	return -EFAULT;
+}
+
 static void a5xx_cp_err_irq(struct msm_gpu *gpu)
 static void a5xx_cp_err_irq(struct msm_gpu *gpu)
 {
 {
 	u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
 	u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
@@ -884,5 +898,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
 		return ERR_PTR(ret);
 		return ERR_PTR(ret);
 	}
 	}
 
 
+	if (gpu->aspace)
+		msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
+
 	return gpu;
 	return gpu;
 }
 }

+ 46 - 16
drivers/gpu/drm/msm/adreno/adreno_device.c

@@ -75,12 +75,14 @@ static const struct adreno_info gpulist[] = {
 		.gmem  = (SZ_1M + SZ_512K),
 		.gmem  = (SZ_1M + SZ_512K),
 		.init  = a4xx_gpu_init,
 		.init  = a4xx_gpu_init,
 	}, {
 	}, {
-		.rev = ADRENO_REV(5, 3, 0, ANY_ID),
+		.rev = ADRENO_REV(5, 3, 0, 2),
 		.revn = 530,
 		.revn = 530,
 		.name = "A530",
 		.name = "A530",
 		.pm4fw = "a530_pm4.fw",
 		.pm4fw = "a530_pm4.fw",
 		.pfpfw = "a530_pfp.fw",
 		.pfpfw = "a530_pfp.fw",
 		.gmem = SZ_1M,
 		.gmem = SZ_1M,
+		.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
+			ADRENO_QUIRK_FAULT_DETECT_MASK,
 		.init = a5xx_gpu_init,
 		.init = a5xx_gpu_init,
 		.gpmufw = "a530v3_gpmu.fw2",
 		.gpmufw = "a530v3_gpmu.fw2",
 	},
 	},
@@ -181,22 +183,51 @@ static void set_gpu_pdev(struct drm_device *dev,
 	priv->gpu_pdev = pdev;
 	priv->gpu_pdev = pdev;
 }
 }
 
 
-static const struct {
-	const char *str;
-	uint32_t flag;
-} quirks[] = {
-	{ "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI },
-	{ "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK },
-};
+static int find_chipid(struct device *dev, u32 *chipid)
+{
+	struct device_node *node = dev->of_node;
+	const char *compat;
+	int ret;
+
+	/* first search the compat strings for qcom,adreno-XYZ.W: */
+	ret = of_property_read_string_index(node, "compatible", 0, &compat);
+	if (ret == 0) {
+		unsigned rev, patch;
+
+		if (sscanf(compat, "qcom,adreno-%u.%u", &rev, &patch) == 2) {
+			*chipid = 0;
+			*chipid |= (rev / 100) << 24;  /* core */
+			rev %= 100;
+			*chipid |= (rev / 10) << 16;   /* major */
+			rev %= 10;
+			*chipid |= rev << 8;           /* minor */
+			*chipid |= patch;
+
+			return 0;
+		}
+	}
+
+	/* and if that fails, fall back to legacy "qcom,chipid" property: */
+	ret = of_property_read_u32(node, "qcom,chipid", chipid);
+	if (ret)
+		return ret;
+
+	dev_warn(dev, "Using legacy qcom,chipid binding!\n");
+	dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n",
+			(*chipid >> 24) & 0xff, (*chipid >> 16) & 0xff,
+			(*chipid >> 8) & 0xff, *chipid & 0xff);
+
+	return 0;
+}
 
 
 static int adreno_bind(struct device *dev, struct device *master, void *data)
 static int adreno_bind(struct device *dev, struct device *master, void *data)
 {
 {
 	static struct adreno_platform_config config = {};
 	static struct adreno_platform_config config = {};
 	struct device_node *child, *node = dev->of_node;
 	struct device_node *child, *node = dev->of_node;
 	u32 val;
 	u32 val;
-	int ret, i;
+	int ret;
 
 
-	ret = of_property_read_u32(node, "qcom,chipid", &val);
+	ret = find_chipid(dev, &val);
 	if (ret) {
 	if (ret) {
 		dev_err(dev, "could not find chipid: %d\n", ret);
 		dev_err(dev, "could not find chipid: %d\n", ret);
 		return ret;
 		return ret;
@@ -224,14 +255,12 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
 	}
 	}
 
 
 	if (!config.fast_rate) {
 	if (!config.fast_rate) {
-		dev_err(dev, "could not find clk rates\n");
-		return -ENXIO;
+		dev_warn(dev, "could not find clk rates\n");
+		/* This is a safe low speed for all devices: */
+		config.fast_rate = 200000000;
+		config.slow_rate = 27000000;
 	}
 	}
 
 
-	for (i = 0; i < ARRAY_SIZE(quirks); i++)
-		if (of_property_read_bool(node, quirks[i].str))
-			config.quirks |= quirks[i].flag;
-
 	dev->platform_data = &config;
 	dev->platform_data = &config;
 	set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
 	set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
 	return 0;
 	return 0;
@@ -260,6 +289,7 @@ static int adreno_remove(struct platform_device *pdev)
 }
 }
 
 
 static const struct of_device_id dt_match[] = {
 static const struct of_device_id dt_match[] = {
+	{ .compatible = "qcom,adreno" },
 	{ .compatible = "qcom,adreno-3xx" },
 	{ .compatible = "qcom,adreno-3xx" },
 	/* for backwards compat w/ downstream kgsl DT files: */
 	/* for backwards compat w/ downstream kgsl DT files: */
 	{ .compatible = "qcom,kgsl-3d0" },
 	{ .compatible = "qcom,kgsl-3d0" },

+ 0 - 1
drivers/gpu/drm/msm/adreno/adreno_gpu.c

@@ -352,7 +352,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 	adreno_gpu->gmem = adreno_gpu->info->gmem;
 	adreno_gpu->gmem = adreno_gpu->info->gmem;
 	adreno_gpu->revn = adreno_gpu->info->revn;
 	adreno_gpu->revn = adreno_gpu->info->revn;
 	adreno_gpu->rev = config->rev;
 	adreno_gpu->rev = config->rev;
-	adreno_gpu->quirks = config->quirks;
 
 
 	gpu->fast_rate = config->fast_rate;
 	gpu->fast_rate = config->fast_rate;
 	gpu->slow_rate = config->slow_rate;
 	gpu->slow_rate = config->slow_rate;

+ 1 - 3
drivers/gpu/drm/msm/adreno/adreno_gpu.h

@@ -75,6 +75,7 @@ struct adreno_info {
 	const char *pm4fw, *pfpfw;
 	const char *pm4fw, *pfpfw;
 	const char *gpmufw;
 	const char *gpmufw;
 	uint32_t gmem;
 	uint32_t gmem;
+	enum adreno_quirks quirks;
 	struct msm_gpu *(*init)(struct drm_device *dev);
 	struct msm_gpu *(*init)(struct drm_device *dev);
 };
 };
 
 
@@ -116,8 +117,6 @@ struct adreno_gpu {
 	 * code (a3xx_gpu.c) and stored in this common location.
 	 * code (a3xx_gpu.c) and stored in this common location.
 	 */
 	 */
 	const unsigned int *reg_offsets;
 	const unsigned int *reg_offsets;
-
-	uint32_t quirks;
 };
 };
 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
 
 
@@ -128,7 +127,6 @@ struct adreno_platform_config {
 #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
 #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
 	struct msm_bus_scale_pdata *bus_scale_table;
 	struct msm_bus_scale_pdata *bus_scale_table;
 #endif
 #endif
-	uint32_t quirks;
 };
 };
 
 
 #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
 #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)

+ 6 - 12
drivers/gpu/drm/msm/dsi/dsi.c

@@ -18,9 +18,7 @@ struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
 	if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
 	if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
 		return NULL;
 		return NULL;
 
 
-	return (msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO) ?
-		msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
-		msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
+	return msm_dsi->encoder;
 }
 }
 
 
 static int dsi_get_phy(struct msm_dsi *msm_dsi)
 static int dsi_get_phy(struct msm_dsi *msm_dsi)
@@ -187,14 +185,13 @@ void __exit msm_dsi_unregister(void)
 }
 }
 
 
 int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
 int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
-		struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
+			 struct drm_encoder *encoder)
 {
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_drm_private *priv = dev->dev_private;
 	struct drm_bridge *ext_bridge;
 	struct drm_bridge *ext_bridge;
-	int ret, i;
+	int ret;
 
 
-	if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
-		!encoders[MSM_DSI_CMD_ENCODER_ID]))
+	if (WARN_ON(!encoder))
 		return -EINVAL;
 		return -EINVAL;
 
 
 	msm_dsi->dev = dev;
 	msm_dsi->dev = dev;
@@ -205,6 +202,8 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
 		goto fail;
 		goto fail;
 	}
 	}
 
 
+	msm_dsi->encoder = encoder;
+
 	msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
 	msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
 	if (IS_ERR(msm_dsi->bridge)) {
 	if (IS_ERR(msm_dsi->bridge)) {
 		ret = PTR_ERR(msm_dsi->bridge);
 		ret = PTR_ERR(msm_dsi->bridge);
@@ -213,11 +212,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
 		goto fail;
 		goto fail;
 	}
 	}
 
 
-	for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
-		encoders[i]->bridge = msm_dsi->bridge;
-		msm_dsi->encoders[i] = encoders[i];
-	}
-
 	/*
 	/*
 	 * check if the dsi encoder output is connected to a panel or an
 	 * check if the dsi encoder output is connected to a panel or an
 	 * external bridge. We create a connector only if we're connected to a
 	 * external bridge. We create a connector only if we're connected to a

+ 41 - 10
drivers/gpu/drm/msm/dsi/dsi.h

@@ -27,14 +27,24 @@
 #define DSI_1	1
 #define DSI_1	1
 #define DSI_MAX	2
 #define DSI_MAX	2
 
 
+struct msm_dsi_phy_shared_timings;
+struct msm_dsi_phy_clk_request;
+
 enum msm_dsi_phy_type {
 enum msm_dsi_phy_type {
 	MSM_DSI_PHY_28NM_HPM,
 	MSM_DSI_PHY_28NM_HPM,
 	MSM_DSI_PHY_28NM_LP,
 	MSM_DSI_PHY_28NM_LP,
 	MSM_DSI_PHY_20NM,
 	MSM_DSI_PHY_20NM,
 	MSM_DSI_PHY_28NM_8960,
 	MSM_DSI_PHY_28NM_8960,
+	MSM_DSI_PHY_14NM,
 	MSM_DSI_PHY_MAX
 	MSM_DSI_PHY_MAX
 };
 };
 
 
+enum msm_dsi_phy_usecase {
+	MSM_DSI_PHY_STANDALONE,
+	MSM_DSI_PHY_MASTER,
+	MSM_DSI_PHY_SLAVE,
+};
+
 #define DSI_DEV_REGULATOR_MAX	8
 #define DSI_DEV_REGULATOR_MAX	8
 #define DSI_BUS_CLK_MAX		4
 #define DSI_BUS_CLK_MAX		4
 
 
@@ -73,8 +83,8 @@ struct msm_dsi {
 	struct device *phy_dev;
 	struct device *phy_dev;
 	bool phy_enabled;
 	bool phy_enabled;
 
 
-	/* the encoders we are hooked to (outside of dsi block) */
-	struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM];
+	/* the encoder we are hooked to (outside of dsi block) */
+	struct drm_encoder *encoder;
 
 
 	int id;
 	int id;
 };
 };
@@ -84,12 +94,9 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
 void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
 void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
 struct drm_connector *msm_dsi_manager_connector_init(u8 id);
 struct drm_connector *msm_dsi_manager_connector_init(u8 id);
 struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
 struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
-int msm_dsi_manager_phy_enable(int id,
-		const unsigned long bit_rate, const unsigned long esc_rate,
-		u32 *clk_pre, u32 *clk_post);
-void msm_dsi_manager_phy_disable(int id);
 int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
 int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
 bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
 bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
+void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags);
 int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
 int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
 void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
 void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
 
 
@@ -111,6 +118,8 @@ int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
 	struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
 	struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
 void msm_dsi_pll_save_state(struct msm_dsi_pll *pll);
 void msm_dsi_pll_save_state(struct msm_dsi_pll *pll);
 int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll);
 int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll);
+int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
+			    enum msm_dsi_phy_usecase uc);
 #else
 #else
 static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
 static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
 			 enum msm_dsi_phy_type type, int id) {
 			 enum msm_dsi_phy_type type, int id) {
@@ -131,6 +140,11 @@ static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
 {
 {
 	return 0;
 	return 0;
 }
 }
+static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
+					  enum msm_dsi_phy_usecase uc)
+{
+	return -ENODEV;
+}
 #endif
 #endif
 
 
 /* dsi host */
 /* dsi host */
@@ -146,7 +160,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
 					u32 dma_base, u32 len);
 					u32 dma_base, u32 len);
 int msm_dsi_host_enable(struct mipi_dsi_host *host);
 int msm_dsi_host_enable(struct mipi_dsi_host *host);
 int msm_dsi_host_disable(struct mipi_dsi_host *host);
 int msm_dsi_host_disable(struct mipi_dsi_host *host);
-int msm_dsi_host_power_on(struct mipi_dsi_host *host);
+int msm_dsi_host_power_on(struct mipi_dsi_host *host,
+			struct msm_dsi_phy_shared_timings *phy_shared_timings);
 int msm_dsi_host_power_off(struct mipi_dsi_host *host);
 int msm_dsi_host_power_off(struct mipi_dsi_host *host);
 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
 					struct drm_display_mode *mode);
 					struct drm_display_mode *mode);
@@ -157,6 +172,9 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
 void msm_dsi_host_unregister(struct mipi_dsi_host *host);
 void msm_dsi_host_unregister(struct mipi_dsi_host *host);
 int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
 int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
 			struct msm_dsi_pll *src_pll);
 			struct msm_dsi_pll *src_pll);
+void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
+void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
+	struct msm_dsi_phy_clk_request *clk_req);
 void msm_dsi_host_destroy(struct mipi_dsi_host *host);
 void msm_dsi_host_destroy(struct mipi_dsi_host *host);
 int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
 int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
 					struct drm_device *dev);
 					struct drm_device *dev);
@@ -164,14 +182,27 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi);
 
 
 /* dsi phy */
 /* dsi phy */
 struct msm_dsi_phy;
 struct msm_dsi_phy;
+struct msm_dsi_phy_shared_timings {
+	u32 clk_post;
+	u32 clk_pre;
+	bool clk_pre_inc_by_2;
+};
+
+struct msm_dsi_phy_clk_request {
+	unsigned long bitclk_rate;
+	unsigned long escclk_rate;
+};
+
 void msm_dsi_phy_driver_register(void);
 void msm_dsi_phy_driver_register(void);
 void msm_dsi_phy_driver_unregister(void);
 void msm_dsi_phy_driver_unregister(void);
 int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
-	const unsigned long bit_rate, const unsigned long esc_rate);
+			struct msm_dsi_phy_clk_request *clk_req);
 void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
 void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
-void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
-					u32 *clk_pre, u32 *clk_post);
+void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
+			struct msm_dsi_phy_shared_timings *shared_timing);
 struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy);
 struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy);
+void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
+			     enum msm_dsi_phy_usecase uc);
 
 
 #endif /* __DSI_CONNECTOR_H__ */
 #endif /* __DSI_CONNECTOR_H__ */
 
 

+ 256 - 13
drivers/gpu/drm/msm/dsi/dsi.xml.h

@@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 git clone https://github.com/freedreno/envytools.git
 
 
 The rules-ng-ng source files this header was generated from are:
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36965 bytes, from 2016-11-26 23:01:08)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  27887 bytes, from 2015-10-22 16:34:52)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
-
-Copyright (C) 2013-2015 by the following authors:
+- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/dsi/dsi.xml    (  33004 bytes, from 2017-01-11 05:19:19)
+- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-05-09 06:32:54)
+
+Copyright (C) 2013-2017 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 
@@ -1304,5 +1295,257 @@ static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
 
 
 #define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG			0x00000018
 #define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG			0x00000018
 
 
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID0			0x00000000
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID1			0x00000004
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID2			0x00000008
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID3			0x0000000c
+
+#define REG_DSI_14nm_PHY_CMN_CLK_CFG0				0x00000010
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK		0x000000f0
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT		4
+static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK;
+}
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK		0x000000f0
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT		4
+static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK;
+}
+
+#define REG_DSI_14nm_PHY_CMN_CLK_CFG1				0x00000014
+#define DSI_14nm_PHY_CMN_CLK_CFG1_DSICLK_SEL			0x00000001
+
+#define REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL			0x00000018
+#define DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL		0x00000004
+
+#define REG_DSI_14nm_PHY_CMN_CTRL_0				0x0000001c
+
+#define REG_DSI_14nm_PHY_CMN_CTRL_1				0x00000020
+
+#define REG_DSI_14nm_PHY_CMN_HW_TRIGGER				0x00000024
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG0				0x00000028
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG1				0x0000002c
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG2				0x00000030
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG0				0x00000034
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG1				0x00000038
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG2				0x0000003c
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG3				0x00000040
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG4				0x00000044
+
+#define REG_DSI_14nm_PHY_CMN_PLL_CNTRL				0x00000048
+#define DSI_14nm_PHY_CMN_PLL_CNTRL_PLL_START			0x00000001
+
+#define REG_DSI_14nm_PHY_CMN_LDO_CNTRL				0x0000004c
+#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK		0x0000003f
+#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT) & DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK			0x000000c0
+#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT			6
+static inline uint32_t DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT) & DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN			0x00000001
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_STR(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(uint32_t i0) { return 0x0000001c + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(uint32_t i0) { return 0x00000020 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(uint32_t i0) { return 0x00000024 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(uint32_t i0) { return 0x00000028 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(uint32_t i0) { return 0x0000002c + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK		0x00000007
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK		0x00000070
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT		4
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(uint32_t i0) { return 0x00000030 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK		0x00000007
+#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(uint32_t i0) { return 0x00000034 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK		0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT		0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+	return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(uint32_t i0) { return 0x00000038 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(uint32_t i0) { return 0x0000003c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00000064 + 0x80*i0; }
+
+#define REG_DSI_14nm_PHY_PLL_IE_TRIM				0x00000000
+
+#define REG_DSI_14nm_PHY_PLL_IP_TRIM				0x00000004
+
+#define REG_DSI_14nm_PHY_PLL_IPTAT_TRIM				0x00000010
+
+#define REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN			0x0000001c
+
+#define REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET			0x00000028
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL			0x0000002c
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2			0x00000030
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL3			0x00000034
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL4			0x00000038
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5			0x0000003c
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1			0x00000040
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2			0x00000044
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT1			0x00000048
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT2			0x0000004c
+
+#define REG_DSI_14nm_PHY_PLL_VREF_CFG1				0x0000005c
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_CODE				0x00000058
+
+#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1			0x0000006c
+
+#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2			0x00000070
+
+#define REG_DSI_14nm_PHY_PLL_VCO_COUNT1				0x00000074
+
+#define REG_DSI_14nm_PHY_PLL_VCO_COUNT2				0x00000078
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1			0x0000007c
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2			0x00000080
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3			0x00000084
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN			0x00000088
+
+#define REG_DSI_14nm_PHY_PLL_PLL_VCO_TUNE			0x0000008c
+
+#define REG_DSI_14nm_PHY_PLL_DEC_START				0x00000090
+
+#define REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER			0x00000094
+
+#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1			0x00000098
+
+#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2			0x0000009c
+
+#define REG_DSI_14nm_PHY_PLL_SSC_PER1				0x000000a0
+
+#define REG_DSI_14nm_PHY_PLL_SSC_PER2				0x000000a4
+
+#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1			0x000000a8
+
+#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2			0x000000ac
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1			0x000000b4
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2			0x000000b8
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3			0x000000bc
+
+#define REG_DSI_14nm_PHY_PLL_TXCLK_EN				0x000000c0
+
+#define REG_DSI_14nm_PHY_PLL_PLL_CRCTRL				0x000000c4
+
+#define REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS		0x000000cc
+
+#define REG_DSI_14nm_PHY_PLL_PLL_MISC1				0x000000e8
+
+#define REG_DSI_14nm_PHY_PLL_CP_SET_CUR				0x000000f0
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICPMSET			0x000000f4
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICPCSET			0x000000f8
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICP_SET			0x000000fc
+
+#define REG_DSI_14nm_PHY_PLL_PLL_LPF1				0x00000100
+
+#define REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV			0x00000104
+
+#define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP			0x00000108
+
 
 
 #endif /* DSI_XML */
 #endif /* DSI_XML */

+ 25 - 0
drivers/gpu/drm/msm/dsi/dsi_cfg.c

@@ -94,6 +94,30 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
 	.num_dsi = 2,
 	.num_dsi = 2,
 };
 };
 
 
+/*
+ * TODO: core_mmss_clk fails to enable for some reason, but things work fine
+ * without it too. Figure out why it doesn't enable and uncomment below
+ */
+static const char * const dsi_8996_bus_clk_names[] = {
+	"mdp_core_clk", "iface_clk", "bus_clk", /* "core_mmss_clk", */
+};
+
+static const struct msm_dsi_config msm8996_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 2,
+		.regs = {
+			{"vdda", 18160, 1 },	/* 1.25 V */
+			{"vcca", 17000, 32 },	/* 0.925 V */
+			{"vddio", 100000, 100 },/* 1.8 V */
+		},
+	},
+	.bus_clk_names = dsi_8996_bus_clk_names,
+	.num_bus_clks = ARRAY_SIZE(dsi_8996_bus_clk_names),
+	.io_start = { 0x994000, 0x996000 },
+	.num_dsi = 2,
+};
+
 static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
 static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
 	{MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
 	{MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
 	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
 	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
@@ -106,6 +130,7 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
 						&msm8974_apq8084_dsi_cfg},
 						&msm8974_apq8084_dsi_cfg},
 	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
 	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
 	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
 	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg},
 };
 };
 
 
 const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
 const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)

+ 1 - 0
drivers/gpu/drm/msm/dsi/dsi_cfg.h

@@ -24,6 +24,7 @@
 #define MSM_DSI_6G_VER_MINOR_V1_2	0x10020000
 #define MSM_DSI_6G_VER_MINOR_V1_2	0x10020000
 #define MSM_DSI_6G_VER_MINOR_V1_3	0x10030000
 #define MSM_DSI_6G_VER_MINOR_V1_3	0x10030000
 #define MSM_DSI_6G_VER_MINOR_V1_3_1	0x10030001
 #define MSM_DSI_6G_VER_MINOR_V1_3_1	0x10030001
+#define MSM_DSI_6G_VER_MINOR_V1_4_1	0x10040001
 
 
 #define MSM_DSI_V2_VER_MINOR_8064	0x0
 #define MSM_DSI_V2_VER_MINOR_8064	0x0
 
 

Niektóre pliki nie zostały wyświetlone z powodu dużej ilości zmienionych plików