Эх сурвалжийг харах

drm/amd/dc: Add dc display driver (v2)

Supported DCE versions: 8.0, 10.0, 11.0, 11.2

v2: rebase against 4.11

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Harry Wentland 8 жил өмнө
parent
commit
4562236b3b
100 өөрчлөгдсөн 48033 нэмэгдсэн , 37 устгасан
  1. 1 0
      drivers/gpu/drm/amd/amdgpu/Kconfig
  2. 16 1
      drivers/gpu/drm/amd/amdgpu/Makefile
  3. 15 0
      drivers/gpu/drm/amd/amdgpu/amdgpu.h
  4. 78 15
      drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
  5. 1 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
  6. 4 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
  7. 0 5
      drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
  8. 19 10
      drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
  9. 1 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
  10. 82 3
      drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
  11. 1 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
  12. 9 0
      drivers/gpu/drm/amd/amdgpu/cik.c
  13. 21 0
      drivers/gpu/drm/amd/amdgpu/vi.c
  14. 28 0
      drivers/gpu/drm/amd/display/Kconfig
  15. 22 0
      drivers/gpu/drm/amd/display/Makefile
  16. 17 0
      drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
  17. 1564 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
  18. 171 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
  19. 484 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
  20. 829 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
  21. 122 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
  22. 443 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
  23. 36 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
  24. 463 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
  25. 3150 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c
  26. 101 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h
  27. 28 0
      drivers/gpu/drm/amd/display/dc/Makefile
  28. 11 0
      drivers/gpu/drm/amd/display/dc/basics/Makefile
  29. 223 0
      drivers/gpu/drm/amd/display/dc/basics/conversion.c
  30. 51 0
      drivers/gpu/drm/amd/display/dc/basics/conversion.h
  31. 691 0
      drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
  32. 221 0
      drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c
  33. 134 0
      drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c
  34. 100 0
      drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
  35. 457 0
      drivers/gpu/drm/amd/display/dc/basics/logger.c
  36. 67 0
      drivers/gpu/drm/amd/display/dc/basics/logger.h
  37. 197 0
      drivers/gpu/drm/amd/display/dc/basics/register_logger.c
  38. 116 0
      drivers/gpu/drm/amd/display/dc/basics/signal_types.c
  39. 307 0
      drivers/gpu/drm/amd/display/dc/basics/vector.c
  40. 24 0
      drivers/gpu/drm/amd/display/dc/bios/Makefile
  41. 4220 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
  42. 33 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser.h
  43. 82 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
  44. 40 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
  45. 50 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c
  46. 72 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h
  47. 2609 0
      drivers/gpu/drm/amd/display/dc/bios/command_table.c
  48. 112 0
      drivers/gpu/drm/amd/display/dc/bios/command_table.h
  49. 288 0
      drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
  50. 90 0
      drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
  51. 364 0
      drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
  52. 34 0
      drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h
  53. 418 0
      drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
  54. 34 0
      drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h
  55. 354 0
      drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
  56. 33 0
      drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h
  57. 10 0
      drivers/gpu/drm/amd/display/dc/calcs/Makefile
  58. 3108 0
      drivers/gpu/drm/amd/display/dc/calcs/bandwidth_calcs.c
  59. 299 0
      drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c
  60. 1382 0
      drivers/gpu/drm/amd/display/dc/calcs/gamma_calcs.c
  61. 1846 0
      drivers/gpu/drm/amd/display/dc/core/dc.c
  62. 270 0
      drivers/gpu/drm/amd/display/dc/core/dc_debug.c
  63. 93 0
      drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
  64. 1899 0
      drivers/gpu/drm/amd/display/dc/core/dc_link.c
  65. 1098 0
      drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
  66. 2462 0
      drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
  67. 222 0
      drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
  68. 1934 0
      drivers/gpu/drm/amd/display/dc/core/dc_resource.c
  69. 113 0
      drivers/gpu/drm/amd/display/dc/core/dc_sink.c
  70. 141 0
      drivers/gpu/drm/amd/display/dc/core/dc_stream.c
  71. 213 0
      drivers/gpu/drm/amd/display/dc/core/dc_surface.c
  72. 334 0
      drivers/gpu/drm/amd/display/dc/core/dc_target.c
  73. 780 0
      drivers/gpu/drm/amd/display/dc/dc.h
  74. 224 0
      drivers/gpu/drm/amd/display/dc/dc_bios_types.h
  75. 115 0
      drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
  76. 105 0
      drivers/gpu/drm/amd/display/dc/dc_dp_types.h
  77. 144 0
      drivers/gpu/drm/amd/display/dc/dc_helper.c
  78. 588 0
      drivers/gpu/drm/amd/display/dc/dc_hw_types.h
  79. 493 0
      drivers/gpu/drm/amd/display/dc/dc_types.h
  80. 14 0
      drivers/gpu/drm/amd/display/dc/dce/Makefile
  81. 920 0
      drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
  82. 145 0
      drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
  83. 1264 0
      drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
  84. 109 0
      drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
  85. 195 0
      drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
  86. 250 0
      drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
  87. 2176 0
      drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
  88. 363 0
      drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
  89. 384 0
      drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
  90. 217 0
      drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
  91. 501 0
      drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
  92. 1302 0
      drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
  93. 564 0
      drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
  94. 1002 0
      drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
  95. 313 0
      drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
  96. 23 0
      drivers/gpu/drm/amd/display/dc/dce100/Makefile
  97. 140 0
      drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
  98. 36 0
      drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
  99. 1085 0
      drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
  100. 19 0
      drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h

+ 1 - 0
drivers/gpu/drm/amd/amdgpu/Kconfig

@@ -41,3 +41,4 @@ config DRM_AMDGPU_GART_DEBUGFS
 	  pages. Uses more memory for housekeeping, enable only for debugging.
 
 source "drivers/gpu/drm/amd/acp/Kconfig"
+source "drivers/gpu/drm/amd/display/Kconfig"

+ 16 - 1
drivers/gpu/drm/amd/amdgpu/Makefile

@@ -3,13 +3,19 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 FULL_AMD_PATH=$(src)/..
+DISPLAY_FOLDER_NAME=display
+FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
 
 ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
 	-I$(FULL_AMD_PATH)/include \
 	-I$(FULL_AMD_PATH)/amdgpu \
 	-I$(FULL_AMD_PATH)/scheduler \
 	-I$(FULL_AMD_PATH)/powerplay/inc \
-	-I$(FULL_AMD_PATH)/acp/include
+	-I$(FULL_AMD_PATH)/acp/include \
+	-I$(FULL_AMD_DISPLAY_PATH) \
+	-I$(FULL_AMD_DISPLAY_PATH)/include \
+	-I$(FULL_AMD_DISPLAY_PATH)/dc \
+	-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm
 
 amdgpu-y := amdgpu_drv.o
 
@@ -132,4 +138,13 @@ include $(FULL_AMD_PATH)/powerplay/Makefile
 
 amdgpu-y += $(AMD_POWERPLAY_FILES)
 
+ifneq ($(CONFIG_DRM_AMD_DC),)
+
+RELATIVE_AMD_DISPLAY_PATH = ../$(DISPLAY_FOLDER_NAME)
+include $(FULL_AMD_DISPLAY_PATH)/Makefile
+
+amdgpu-y += $(AMD_DISPLAY_FILES)
+
+endif
+
 obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o

+ 15 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu.h

@@ -66,6 +66,7 @@
 #include "amdgpu_vce.h"
 #include "amdgpu_vcn.h"
 #include "amdgpu_mn.h"
+#include "amdgpu_dm.h"
 
 #include "gpu_scheduler.h"
 #include "amdgpu_virt.h"
@@ -101,6 +102,7 @@ extern int amdgpu_vm_fragment_size;
 extern int amdgpu_vm_fault_stop;
 extern int amdgpu_vm_debug;
 extern int amdgpu_vm_update_mode;
+extern int amdgpu_dc;
 extern int amdgpu_sched_jobs;
 extern int amdgpu_sched_hw_submission;
 extern int amdgpu_no_evict;
@@ -1507,6 +1509,7 @@ struct amdgpu_device {
 	/* display */
 	bool				enable_virtual_display;
 	struct amdgpu_mode_info		mode_info;
+	/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
 	struct work_struct		hotplug_work;
 	struct amdgpu_irq_src		crtc_irq;
 	struct amdgpu_irq_src		pageflip_irq;
@@ -1563,6 +1566,9 @@ struct amdgpu_device {
 	/* GDS */
 	struct amdgpu_gds		gds;
 
+	/* display related functionality */
+	struct amdgpu_display_manager dm;
+
 	struct amdgpu_ip_block          ip_blocks[AMDGPU_MAX_IP_NUM];
 	int				num_ip_blocks;
 	struct mutex	mn_lock;
@@ -1624,6 +1630,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
 
+bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
+bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
+
 /*
  * Registers read & write functions.
  */
@@ -1884,5 +1893,11 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
 			   uint64_t addr, struct amdgpu_bo **bo,
 			   struct amdgpu_bo_va_mapping **mapping);
 
+#if defined(CONFIG_DRM_AMD_DC)
+int amdgpu_dm_display_resume(struct amdgpu_device *adev );
+#else
+static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
+#endif
+
 #include "amdgpu_object.h"
 #endif

+ 78 - 15
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

@@ -31,6 +31,7 @@
 #include <linux/debugfs.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/amdgpu_drm.h>
 #include <linux/vgaarb.h>
 #include <linux/vga_switcheroo.h>
@@ -1973,6 +1974,41 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
 	}
 }
 
+bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+{
+	switch (asic_type) {
+#if defined(CONFIG_DRM_AMD_DC)
+	case CHIP_BONAIRE:
+	case CHIP_HAWAII:
+	case CHIP_CARRIZO:
+	case CHIP_STONEY:
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS10:
+	case CHIP_TONGA:
+	case CHIP_FIJI:
+#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
+		return amdgpu_dc != 0;
+#else
+		return amdgpu_dc > 0;
+#endif
+#endif
+	default:
+		return false;
+	}
+}
+
+/**
+ * amdgpu_device_has_dc_support - check if dc is supported
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * Returns true for supported, false for not supported
+ */
+bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
+{
+	return amdgpu_device_asic_has_dc_support(adev->asic_type);
+}
+
 /**
  * amdgpu_device_init - initialize the driver
  *
@@ -2168,7 +2204,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 			goto failed;
 		}
 		/* init i2c buses */
-		amdgpu_atombios_i2c_init(adev);
+		if (!amdgpu_device_has_dc_support(adev))
+			amdgpu_atombios_i2c_init(adev);
 	}
 
 	/* Fence driver */
@@ -2296,7 +2333,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 	adev->accel_working = false;
 	cancel_delayed_work_sync(&adev->late_init_work);
 	/* free i2c buses */
-	amdgpu_i2c_fini(adev);
+	if (!amdgpu_device_has_dc_support(adev))
+		amdgpu_i2c_fini(adev);
 	amdgpu_atombios_fini(adev);
 	kfree(adev->bios);
 	adev->bios = NULL;
@@ -2346,12 +2384,14 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 
 	drm_kms_helper_poll_disable(dev);
 
-	/* turn off display hw */
-	drm_modeset_lock_all(dev);
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+	if (!amdgpu_device_has_dc_support(adev)) {
+		/* turn off display hw */
+		drm_modeset_lock_all(dev);
+		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+		}
+		drm_modeset_unlock_all(dev);
 	}
-	drm_modeset_unlock_all(dev);
 
 	amdgpu_amdkfd_suspend(adev);
 
@@ -2494,13 +2534,25 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
 
 	/* blat the mode back in */
 	if (fbcon) {
-		drm_helper_resume_force_mode(dev);
-		/* turn on display hw */
-		drm_modeset_lock_all(dev);
-		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+		if (!amdgpu_device_has_dc_support(adev)) {
+			/* pre DCE11 */
+			drm_helper_resume_force_mode(dev);
+
+			/* turn on display hw */
+			drm_modeset_lock_all(dev);
+			list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+			}
+			drm_modeset_unlock_all(dev);
+		} else {
+			/*
+			 * There is no equivalent atomic helper to turn on
+			 * display, so we defined our own function for this,
+			 * once suspend resume is supported by the atomic
+			 * framework this will be reworked
+			 */
+			amdgpu_dm_display_resume(adev);
 		}
-		drm_modeset_unlock_all(dev);
 	}
 
 	drm_kms_helper_poll_enable(dev);
@@ -2517,7 +2569,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
 #ifdef CONFIG_PM
 	dev->dev->power.disable_depth++;
 #endif
-	drm_helper_hpd_irq_event(dev);
+	if (!amdgpu_device_has_dc_support(adev))
+		drm_helper_hpd_irq_event(dev);
+	else
+		drm_kms_helper_hotplug_event(dev);
 #ifdef CONFIG_PM
 	dev->dev->power.disable_depth--;
 #endif
@@ -2814,6 +2869,7 @@ give_up_reset:
  */
 int amdgpu_gpu_reset(struct amdgpu_device *adev)
 {
+	struct drm_atomic_state *state = NULL;
 	int i, r;
 	int resched;
 	bool need_full_reset, vram_lost = false;
@@ -2827,6 +2883,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
 
 	/* block TTM */
 	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+	/* store modesetting */
+	if (amdgpu_device_has_dc_support(adev))
+		state = drm_atomic_helper_suspend(adev->ddev);
 
 	/* block scheduler */
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -2944,7 +3003,11 @@ out:
 		}
 	}
 
-	drm_helper_resume_force_mode(adev->ddev);
+	if (amdgpu_device_has_dc_support(adev)) {
+		r = drm_atomic_helper_resume(adev->ddev, state);
+		amdgpu_dm_display_resume(adev);
+	} else
+		drm_helper_resume_force_mode(adev->ddev);
 
 	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
 	if (r) {

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h

@@ -429,7 +429,7 @@ struct amdgpu_pm {
 	uint32_t                fw_version;
 	uint32_t                pcie_gen_mask;
 	uint32_t                pcie_mlw_mask;
-	struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
+	struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
 };
 
 #define R600_SSTU_DFLT                               0

+ 4 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c

@@ -103,6 +103,7 @@ int amdgpu_vm_debug = 0;
 int amdgpu_vram_page_split = 512;
 int amdgpu_vm_update_mode = -1;
 int amdgpu_exp_hw_support = 0;
+int amdgpu_dc = -1;
 int amdgpu_sched_jobs = 32;
 int amdgpu_sched_hw_submission = 2;
 int amdgpu_no_evict = 0;
@@ -207,6 +208,9 @@ module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
 MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
 module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
 
+MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
+module_param_named(dc, amdgpu_dc, int, 0444);
+
 MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
 module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
 

+ 0 - 5
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c

@@ -42,11 +42,6 @@
    this contains a helper + a amdgpu fb
    the helper contains a pointer to amdgpu framebuffer baseclass.
 */
-struct amdgpu_fbdev {
-	struct drm_fb_helper helper;
-	struct amdgpu_framebuffer rfb;
-	struct amdgpu_device *adev;
-};
 
 static int
 amdgpufb_open(struct fb_info *info, int user)

+ 19 - 10
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c

@@ -37,6 +37,10 @@
 
 #include <linux/pm_runtime.h>
 
+#ifdef CONFIG_DRM_AMD_DC
+#include "amdgpu_dm_irq.h"
+#endif
+
 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
 
 /*
@@ -221,15 +225,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
 
 	spin_lock_init(&adev->irq.lock);
 
-	if (!adev->enable_virtual_display)
-		/* Disable vblank irqs aggressively for power-saving */
-		adev->ddev->vblank_disable_immediate = true;
-
-	r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
-	if (r) {
-		return r;
-	}
-
 	/* enable msi */
 	adev->irq.msi_enabled = false;
 
@@ -241,7 +236,21 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
 		}
 	}
 
-	INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
+	if (!amdgpu_device_has_dc_support(adev)) {
+		if (!adev->enable_virtual_display)
+			/* Disable vblank irqs aggressively for power-saving */
+			/* XXX: can this be enabled for DC? */
+			adev->ddev->vblank_disable_immediate = true;
+
+		r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
+		if (r)
+			return r;
+
+		/* pre DCE11 */
+		INIT_WORK(&adev->hotplug_work,
+				amdgpu_hotplug_work_func);
+	}
+
 	INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
 
 	adev->irq.installed = true;

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c

@@ -1034,7 +1034,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
-	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
 };
 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
 

+ 82 - 3
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h

@@ -38,11 +38,15 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/hrtimer.h>
 #include "amdgpu_irq.h"
 
+#include <drm/drm_dp_mst_helper.h>
+#include "modules/inc/mod_freesync.h"
+
 struct amdgpu_bo;
 struct amdgpu_device;
 struct amdgpu_encoder;
@@ -292,6 +296,27 @@ struct amdgpu_display_funcs {
 			      uint16_t connector_object_id,
 			      struct amdgpu_hpd *hpd,
 			      struct amdgpu_router *router);
+	/* it is used to enter or exit into free sync mode */
+	int (*notify_freesync)(struct drm_device *dev, void *data,
+			       struct drm_file *filp);
+	/* it is used to allow enablement of freesync mode */
+	int (*set_freesync_property)(struct drm_connector *connector,
+				     struct drm_property *property,
+				     uint64_t val);
+
+
+};
+
+struct amdgpu_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj;
+};
+
+struct amdgpu_fbdev {
+	struct drm_fb_helper helper;
+	struct amdgpu_framebuffer rfb;
+	struct list_head fbdev_list;
+	struct amdgpu_device *adev;
 };
 
 struct amdgpu_mode_info {
@@ -400,6 +425,11 @@ struct amdgpu_crtc {
 	/* for virtual dce */
 	struct hrtimer vblank_timer;
 	enum amdgpu_interrupt_state vsync_timer_enabled;
+
+	int otg_inst;
+	uint32_t flip_flags;
+	/* After Set Mode target will be non-NULL */
+	struct dc_target *target;
 };
 
 struct amdgpu_encoder_atom_dig {
@@ -489,6 +519,19 @@ enum amdgpu_connector_dither {
 	AMDGPU_FMT_DITHER_ENABLE = 1,
 };
 
+struct amdgpu_dm_dp_aux {
+	struct drm_dp_aux aux;
+	uint32_t link_index;
+};
+
+struct amdgpu_i2c_adapter {
+	struct i2c_adapter base;
+	struct amdgpu_display_manager *dm;
+	uint32_t link_index;
+};
+
+#define TO_DM_AUX(x) container_of((x), struct amdgpu_dm_dp_aux, aux)
+
 struct amdgpu_connector {
 	struct drm_connector base;
 	uint32_t connector_id;
@@ -500,6 +543,14 @@ struct amdgpu_connector {
 	/* we need to mind the EDID between detect
 	   and get modes due to analog/digital/tvencoder */
 	struct edid *edid;
+	/* number of modes generated from EDID at 'dc_sink' */
+	int num_modes;
+	/* The 'old' sink - before an HPD.
+	 * The 'current' sink is in dc_link->sink. */
+	const struct dc_sink *dc_sink;
+	const struct dc_link *dc_link;
+	const struct dc_sink *dc_em_sink;
+	const struct dc_target *target;
 	void *con_priv;
 	bool dac_load_detect;
 	bool detected_by_load; /* if the connection status was determined by load */
@@ -510,11 +561,39 @@ struct amdgpu_connector {
 	enum amdgpu_connector_audio audio;
 	enum amdgpu_connector_dither dither;
 	unsigned pixelclock_for_modeset;
+
+	struct drm_dp_mst_topology_mgr mst_mgr;
+	struct amdgpu_dm_dp_aux dm_dp_aux;
+	struct drm_dp_mst_port *port;
+	struct amdgpu_connector *mst_port;
+	struct amdgpu_encoder *mst_encoder;
+	struct semaphore mst_sem;
+
+	/* TODO see if we can merge with ddc_bus or make a dm_connector */
+	struct amdgpu_i2c_adapter *i2c;
+
+	/* Monitor range limits */
+	int min_vfreq ;
+	int max_vfreq ;
+	int pixel_clock_mhz;
+
+	/*freesync caps*/
+	struct mod_freesync_caps caps;
+
+	struct mutex hpd_lock;
+
 };
 
-struct amdgpu_framebuffer {
-	struct drm_framebuffer base;
-	struct drm_gem_object *obj;
+/* TODO: start to use this struct and remove same field from base one */
+struct amdgpu_mst_connector {
+	struct amdgpu_connector base;
+
+	struct drm_dp_mst_topology_mgr mst_mgr;
+	struct amdgpu_dm_dp_aux dm_dp_aux;
+	struct drm_dp_mst_port *port;
+	struct amdgpu_connector *mst_port;
+	bool is_mst_connector;
+	struct amdgpu_encoder *mst_encoder;
 };
 
 #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c

@@ -1467,7 +1467,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
 			list_for_each_entry(crtc,
 					    &ddev->mode_config.crtc_list, head) {
 				amdgpu_crtc = to_amdgpu_crtc(crtc);
-				if (crtc->enabled) {
+				if (amdgpu_crtc->enabled) {
 					adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
 					adev->pm.dpm.new_active_crtc_count++;
 				}

+ 9 - 0
drivers/gpu/drm/amd/amdgpu/cik.c

@@ -65,6 +65,7 @@
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
 
+#include "amdgpu_dm.h"
 #include "amdgpu_amdkfd.h"
 #include "amdgpu_powerplay.h"
 #include "dce_virtual.h"
@@ -1900,6 +1901,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display)
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
 		else
 			amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
 		amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
@@ -1914,6 +1919,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display)
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
 		else
 			amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
 		amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);

+ 21 - 0
drivers/gpu/drm/amd/amdgpu/vi.c

@@ -77,6 +77,7 @@
 #endif
 #include "dce_virtual.h"
 #include "mxgpu_vi.h"
+#include "amdgpu_dm.h"
 
 /*
  * Indirect registers accessor
@@ -1496,6 +1497,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
 		else
 			amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1512,6 +1517,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
 		else
 			amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1530,6 +1539,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display)
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
 		else
 			amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1544,6 +1557,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display)
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
 		else
 			amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1561,6 +1578,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display)
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
 		else
 			amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
 		amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);

+ 28 - 0
drivers/gpu/drm/amd/display/Kconfig

@@ -0,0 +1,28 @@
+menu "Display Engine Configuration"
+	depends on DRM && DRM_AMDGPU
+
+config DRM_AMD_DC
+	bool "AMD DC - Enable new display engine"
+	default y
+	help
+	  Choose this option if you want to use the new display engine
+	  support for AMDGPU. This adds required support for Vega and
+	  Raven ASICs.
+
+config DRM_AMD_DC_PRE_VEGA
+	bool "DC support for Polaris and older ASICs"
+	default n
+	help
+	  Choose this option to enable the new DC support for older asics
+	  by default. This includes Polaris, Carrizo, Tonga, Bonaire,
+	  and Hawaii.
+
+config DEBUG_KERNEL_DC
+	bool "Enable kgdb break in DC"
+	depends on DRM_AMD_DC
+	help
+	  Choose this option
+	  if you want to hit
+	  kdgb_break in assert.
+
+endmenu

+ 22 - 0
drivers/gpu/drm/amd/display/Makefile

@@ -0,0 +1,22 @@
+#
+# Makefile for the DAL (Display Abstract Layer), which is a  sub-component
+# of the AMDGPU drm driver.
+# It provides the HW control for display related functionalities.
+
+AMDDALPATH = $(RELATIVE_AMD_DISPLAY_PATH)
+
+subdir-ccflags-y += -I$(AMDDALPATH)/ -I$(AMDDALPATH)/include
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
+
+#TODO: remove when Timing Sync feature is complete
+subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
+
+DAL_LIBS = amdgpu_dm dc	modules/freesync
+
+AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
+
+include $(AMD_DAL)

+ 17 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/Makefile

@@ -0,0 +1,17 @@
+#
+# Makefile for the 'dm' sub-component of DAL.
+# It provides the control and status of dm blocks.
+
+
+
+AMDGPUDM = amdgpu_dm_types.o amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o
+
+ifneq ($(CONFIG_DRM_AMD_DC),)
+AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
+endif
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
+
+AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
+
+AMD_DISPLAY_FILES += $(AMDGPU_DM)

+ 1564 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c

@@ -0,0 +1,1564 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services_types.h"
+#include "dc.h"
+
+#include "vid.h"
+#include "amdgpu.h"
+#include "atom.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_types.h"
+
+#include "amd_shared.h"
+#include "amdgpu_dm_irq.h"
+#include "dm_helpers.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_mst_helper.h>
+
+#include "modules/inc/mod_freesync.h"
+
+/* Debug facilities */
+#define AMDGPU_DM_NOT_IMPL(fmt, ...) \
+	DRM_INFO("DM_NOT_IMPL: " fmt, ##__VA_ARGS__)
+
+/*
+ * dm_vblank_get_counter
+ *
+ * @brief
+ * Get counter for number of vertical blanks
+ *
+ * @param
+ * struct amdgpu_device *adev - [in] desired amdgpu device
+ * int disp_idx - [in] which CRTC to get the counter from
+ *
+ * @return
+ * Counter for vertical blanks
+ */
+static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+	if (crtc >= adev->mode_info.num_crtc)
+		return 0;
+	else {
+		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
+
+		if (NULL == acrtc->target) {
+			DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc);
+			return 0;
+		}
+
+		return dc_target_get_vblank_counter(acrtc->target);
+	}
+}
+
+static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+					u32 *vbl, u32 *position)
+{
+	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+		return -EINVAL;
+	else {
+		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
+
+		if (NULL == acrtc->target) {
+			DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc);
+			return 0;
+		}
+
+		return dc_target_get_scanoutpos(acrtc->target, vbl, position);
+	}
+
+	return 0;
+}
+
+static bool dm_is_idle(void *handle)
+{
+	/* XXX todo */
+	return true;
+}
+
+static int dm_wait_for_idle(void *handle)
+{
+	/* XXX todo */
+	return 0;
+}
+
+static bool dm_check_soft_reset(void *handle)
+{
+	return false;
+}
+
+static int dm_soft_reset(void *handle)
+{
+	/* XXX todo */
+	return 0;
+}
+
+static struct amdgpu_crtc *get_crtc_by_otg_inst(
+	struct amdgpu_device *adev,
+	int otg_inst)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_crtc *crtc;
+	struct amdgpu_crtc *amdgpu_crtc;
+
+	/*
+	 * following if is check inherited from both functions where this one is
+	 * used now. Need to be checked why it could happen.
+	 */
+	if (otg_inst == -1) {
+		WARN_ON(1);
+		return adev->mode_info.crtcs[0];
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+		if (amdgpu_crtc->otg_inst == otg_inst)
+			return amdgpu_crtc;
+	}
+
+	return NULL;
+}
+
+static void dm_pflip_high_irq(void *interrupt_params)
+{
+	struct amdgpu_flip_work *works;
+	struct amdgpu_crtc *amdgpu_crtc;
+	struct common_irq_params *irq_params = interrupt_params;
+	struct amdgpu_device *adev = irq_params->adev;
+	unsigned long flags;
+
+	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
+
+	/* IRQ could occur when in initial stage */
+	/*TODO work and BO cleanup */
+	if (amdgpu_crtc == NULL) {
+		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
+		return;
+	}
+
+	spin_lock_irqsave(&adev->ddev->event_lock, flags);
+	works = amdgpu_crtc->pflip_works;
+
+	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
+						 amdgpu_crtc->pflip_status,
+						 AMDGPU_FLIP_SUBMITTED,
+						 amdgpu_crtc->crtc_id,
+						 amdgpu_crtc);
+		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+		return;
+	}
+
+	/* page flip completed. clean up */
+	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+	amdgpu_crtc->pflip_works = NULL;
+
+	/* wakeup usersapce */
+	if (works->event)
+		drm_crtc_send_vblank_event(&amdgpu_crtc->base,
+					   works->event);
+
+	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+	DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE, work: %p,\n",
+					__func__, amdgpu_crtc->crtc_id, amdgpu_crtc, works);
+
+	drm_crtc_vblank_put(&amdgpu_crtc->base);
+	schedule_work(&works->unpin_work);
+}
+
+static void dm_crtc_high_irq(void *interrupt_params)
+{
+	struct common_irq_params *irq_params = interrupt_params;
+	struct amdgpu_device *adev = irq_params->adev;
+	uint8_t crtc_index = 0;
+	struct amdgpu_crtc *acrtc;
+
+	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
+
+	if (acrtc)
+		crtc_index = acrtc->crtc_id;
+
+	drm_handle_vblank(adev->ddev, crtc_index);
+}
+
+static int dm_set_clockgating_state(void *handle,
+		  enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int dm_set_powergating_state(void *handle,
+		  enum amd_powergating_state state)
+{
+	return 0;
+}
+
+/* Prototypes of private functions */
+static int dm_early_init(void* handle);
+
+static void hotplug_notify_work_func(struct work_struct *work)
+{
+	struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
+	struct drm_device *dev = dm->ddev;
+
+	drm_kms_helper_hotplug_event(dev);
+}
+
+/* Init display KMS
+ *
+ * Returns 0 on success
+ */
+int amdgpu_dm_init(struct amdgpu_device *adev)
+{
+	struct dc_init_data init_data;
+	adev->dm.ddev = adev->ddev;
+	adev->dm.adev = adev;
+
+	DRM_INFO("DAL is enabled\n");
+	/* Zero all the fields */
+	memset(&init_data, 0, sizeof(init_data));
+
+	/* initialize DAL's lock (for SYNC context use) */
+	spin_lock_init(&adev->dm.dal_lock);
+
+	/* initialize DAL's mutex */
+	mutex_init(&adev->dm.dal_mutex);
+
+	if(amdgpu_dm_irq_init(adev)) {
+		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+		goto error;
+	}
+
+	init_data.asic_id.chip_family = adev->family;
+
+	init_data.asic_id.pci_revision_id = adev->rev_id;
+	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
+
+	init_data.asic_id.vram_width = adev->mc.vram_width;
+	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
+	init_data.asic_id.atombios_base_address =
+		adev->mode_info.atom_context->bios;
+
+	init_data.driver = adev;
+
+	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
+
+	if (!adev->dm.cgs_device) {
+		DRM_ERROR("amdgpu: failed to create cgs device.\n");
+		goto error;
+	}
+
+	init_data.cgs_device = adev->dm.cgs_device;
+
+	adev->dm.dal = NULL;
+
+	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
+
+	/* Display Core create. */
+	adev->dm.dc = dc_create(&init_data);
+
+	if (!adev->dm.dc)
+		DRM_INFO("Display Core failed to initialize!\n");
+
+	INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
+
+	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
+	if (!adev->dm.freesync_module) {
+		DRM_ERROR(
+		"amdgpu: failed to initialize freesync_module.\n");
+	} else
+		DRM_INFO("amdgpu: freesync_module init done %p.\n",
+				adev->dm.freesync_module);
+
+	if (amdgpu_dm_initialize_drm_device(adev)) {
+		DRM_ERROR(
+		"amdgpu: failed to initialize sw for display support.\n");
+		goto error;
+	}
+
+	/* Update the actual used number of crtc */
+	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+
+	/* TODO: Add_display_info? */
+
+	/* TODO use dynamic cursor width */
+	adev->ddev->mode_config.cursor_width = 128;
+	adev->ddev->mode_config.cursor_height = 128;
+
+	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
+		DRM_ERROR(
+		"amdgpu: failed to initialize sw for display support.\n");
+		goto error;
+	}
+
+	DRM_INFO("KMS initialized.\n");
+
+	return 0;
+error:
+	amdgpu_dm_fini(adev);
+
+	return -1;
+}
+
+void amdgpu_dm_fini(struct amdgpu_device *adev)
+{
+	amdgpu_dm_destroy_drm_device(&adev->dm);
+	/*
+	 * TODO: pageflip, vlank interrupt
+	 *
+	 * amdgpu_dm_irq_fini(adev);
+	 */
+
+	if (adev->dm.cgs_device) {
+		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
+		adev->dm.cgs_device = NULL;
+	}
+	if (adev->dm.freesync_module) {
+		mod_freesync_destroy(adev->dm.freesync_module);
+		adev->dm.freesync_module = NULL;
+	}
+	/* DC Destroy TODO: Replace destroy DAL */
+	{
+		dc_destroy(&adev->dm.dc);
+	}
+	return;
+}
+
+/* moved from amdgpu_dm_kms.c */
+void amdgpu_dm_destroy()
+{
+}
+
+static int dm_sw_init(void *handle)
+{
+	return 0;
+}
+
+static int dm_sw_fini(void *handle)
+{
+	return 0;
+}
+
+static void detect_link_for_all_connectors(struct drm_device *dev)
+{
+	struct amdgpu_connector *aconnector;
+	struct drm_connector *connector;
+
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		   aconnector = to_amdgpu_connector(connector);
+		   if (aconnector->dc_link->type == dc_connection_mst_branch) {
+			   DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+						aconnector, aconnector->base.base.id);
+
+				if (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) < 0) {
+					DRM_ERROR("DM_MST: Failed to start MST\n");
+					((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
+				}
+		   }
+	}
+
+	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+static void s3_handle_mst(struct drm_device *dev, bool suspend)
+{
+	struct amdgpu_connector *aconnector;
+	struct drm_connector *connector;
+
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		   aconnector = to_amdgpu_connector(connector);
+		   if (aconnector->dc_link->type == dc_connection_mst_branch &&
+				   !aconnector->mst_port) {
+
+			   if (suspend)
+				   drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
+			   else
+				   drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
+		   }
+	}
+
+	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+static int dm_hw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	/* Create DAL display manager */
+	amdgpu_dm_init(adev);
+
+	amdgpu_dm_hpd_init(adev);
+
+	detect_link_for_all_connectors(adev->ddev);
+
+	return 0;
+}
+
+static int dm_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_dm_hpd_fini(adev);
+
+	amdgpu_dm_irq_fini(adev);
+
+	return 0;
+}
+
+static int dm_suspend(void *handle)
+{
+	struct amdgpu_device *adev = handle;
+	struct amdgpu_display_manager *dm = &adev->dm;
+	int ret = 0;
+	struct drm_crtc *crtc;
+
+	s3_handle_mst(adev->ddev, true);
+
+	/* flash all pending vblank events and turn interrupt off
+	 * before disabling CRTCs. They will be enabled back in
+	 * dm_display_resume
+	 */
+	drm_modeset_lock_all(adev->ddev);
+	list_for_each_entry(crtc, &adev->ddev->mode_config.crtc_list, head) {
+		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+		if (acrtc->target)
+				drm_crtc_vblank_off(crtc);
+	}
+	drm_modeset_unlock_all(adev->ddev);
+
+	amdgpu_dm_irq_suspend(adev);
+
+	dc_set_power_state(
+		dm->dc,
+		DC_ACPI_CM_POWER_STATE_D3,
+		DC_VIDEO_POWER_SUSPEND);
+
+	return ret;
+}
+
+struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
+	struct drm_atomic_state *state,
+	struct drm_crtc *crtc,
+	bool from_state_var)
+{
+	uint32_t i;
+	struct drm_connector_state *conn_state;
+	struct drm_connector *connector;
+	struct drm_crtc *crtc_from_state;
+
+	for_each_connector_in_state(
+		state,
+		connector,
+		conn_state,
+		i) {
+		crtc_from_state =
+			from_state_var ?
+				conn_state->crtc :
+				connector->state->crtc;
+
+		if (crtc_from_state == crtc)
+			return to_amdgpu_connector(connector);
+	}
+
+	return NULL;
+}
+
+static int dm_display_resume(struct drm_device *ddev)
+{
+	int ret = 0;
+	struct drm_connector *connector;
+
+	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
+	struct drm_plane *plane;
+	struct drm_crtc *crtc;
+	struct amdgpu_connector *aconnector;
+	struct drm_connector_state *conn_state;
+
+	if (!state)
+		return ENOMEM;
+
+	state->acquire_ctx = ddev->mode_config.acquire_ctx;
+
+	/* Construct an atomic state to restore previous display setting */
+
+	/*
+	 * Attach connectors to drm_atomic_state
+	 * Should be done in the first place in order to make connectors
+	 * available in state during crtc state processing. It is used for
+	 * making decision if crtc should be disabled in case sink got
+	 * disconnected.
+	 *
+	 * Connectors state crtc with NULL dc_sink should be cleared, because it
+	 * will fail validation during commit
+	 */
+	list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
+		aconnector = to_amdgpu_connector(connector);
+		conn_state = drm_atomic_get_connector_state(state, connector);
+
+		ret = PTR_ERR_OR_ZERO(conn_state);
+		if (ret)
+			goto err;
+	}
+
+	/* Attach crtcs to drm_atomic_state*/
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		struct drm_crtc_state *crtc_state =
+			drm_atomic_get_crtc_state(state, crtc);
+
+		ret = PTR_ERR_OR_ZERO(crtc_state);
+		if (ret)
+			goto err;
+
+		/* force a restore */
+		crtc_state->mode_changed = true;
+	}
+
+
+	/* Attach planes to drm_atomic_state */
+	list_for_each_entry(plane, &ddev->mode_config.plane_list, head) {
+
+		struct drm_crtc *crtc;
+		struct drm_gem_object *obj;
+		struct drm_framebuffer *fb;
+		struct amdgpu_framebuffer *afb;
+		struct amdgpu_bo *rbo;
+		int r;
+		struct drm_plane_state *plane_state = drm_atomic_get_plane_state(state, plane);
+
+		ret = PTR_ERR_OR_ZERO(plane_state);
+		if (ret)
+			goto err;
+
+		crtc = plane_state->crtc;
+		fb = plane_state->fb;
+
+		if (!crtc || !crtc->state || !crtc->state->active)
+			continue;
+
+		if (!fb) {
+			DRM_DEBUG_KMS("No FB bound\n");
+			return 0;
+		}
+
+		/*
+		 * Pin back the front buffers, cursor buffer was already pinned
+		 * back in amdgpu_resume_kms
+		 */
+
+		afb = to_amdgpu_framebuffer(fb);
+
+		obj = afb->obj;
+		rbo = gem_to_amdgpu_bo(obj);
+		r = amdgpu_bo_reserve(rbo, false);
+		if (unlikely(r != 0))
+		       return r;
+
+		r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
+
+		amdgpu_bo_unreserve(rbo);
+
+		if (unlikely(r != 0)) {
+			DRM_ERROR("Failed to pin framebuffer\n");
+			return r;
+		}
+
+	}
+
+
+	/* Call commit internally with the state we just constructed */
+	ret = drm_atomic_commit(state);
+	if (!ret)
+		return 0;
+
+err:
+	DRM_ERROR("Restoring old state failed with %i\n", ret);
+	drm_atomic_state_put(state);
+
+	return ret;
+}
+
+static int dm_resume(void *handle)
+{
+	struct amdgpu_device *adev = handle;
+	struct amdgpu_display_manager *dm = &adev->dm;
+
+	/* power on hardware */
+	dc_set_power_state(
+		dm->dc,
+		DC_ACPI_CM_POWER_STATE_D0,
+		DC_VIDEO_POWER_ON);
+
+	return 0;
+}
+
+int amdgpu_dm_display_resume(struct amdgpu_device *adev )
+{
+	struct drm_device *ddev = adev->ddev;
+	struct amdgpu_display_manager *dm = &adev->dm;
+	struct amdgpu_connector *aconnector;
+	struct drm_connector *connector;
+	int ret = 0;
+	struct drm_crtc *crtc;
+
+	/* program HPD filter */
+	dc_resume(dm->dc);
+
+	/* On resume we need to  rewrite the MSTM control bits to enamble MST*/
+	s3_handle_mst(ddev, false);
+
+	/*
+	 * early enable HPD Rx IRQ, should be done before set mode as short
+	 * pulse interrupts are used for MST
+	 */
+	amdgpu_dm_irq_resume_early(adev);
+
+	drm_modeset_lock_all(ddev);
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+		if (acrtc->target)
+				drm_crtc_vblank_on(crtc);
+	}
+	drm_modeset_unlock_all(ddev);
+
+	/* Do detection*/
+	list_for_each_entry(connector,
+			&ddev->mode_config.connector_list, head) {
+		aconnector = to_amdgpu_connector(connector);
+
+		/*
+		 * this is the case when traversing through already created
+		 * MST connectors, should be skipped
+		 */
+		if (aconnector->mst_port)
+			continue;
+
+		dc_link_detect(aconnector->dc_link, false);
+		aconnector->dc_sink = NULL;
+		amdgpu_dm_update_connector_after_detect(aconnector);
+	}
+
+	drm_modeset_lock_all(ddev);
+	ret = dm_display_resume(ddev);
+	drm_modeset_unlock_all(ddev);
+
+	amdgpu_dm_irq_resume(adev);
+
+	return ret;
+}
+
+static const struct amd_ip_funcs amdgpu_dm_funcs = {
+	.name = "dm",
+	.early_init = dm_early_init,
+	.late_init = NULL,
+	.sw_init = dm_sw_init,
+	.sw_fini = dm_sw_fini,
+	.hw_init = dm_hw_init,
+	.hw_fini = dm_hw_fini,
+	.suspend = dm_suspend,
+	.resume = dm_resume,
+	.is_idle = dm_is_idle,
+	.wait_for_idle = dm_wait_for_idle,
+	.check_soft_reset = dm_check_soft_reset,
+	.soft_reset = dm_soft_reset,
+	.set_clockgating_state = dm_set_clockgating_state,
+	.set_powergating_state = dm_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version dm_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &amdgpu_dm_funcs,
+};
+
+/* TODO: it is temporary non-const, should fixed later */
+static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
+	.atomic_check = amdgpu_dm_atomic_check,
+	.atomic_commit = amdgpu_dm_atomic_commit
+};
+
+void amdgpu_dm_update_connector_after_detect(
+	struct amdgpu_connector *aconnector)
+{
+	struct drm_connector *connector = &aconnector->base;
+	struct drm_device *dev = connector->dev;
+	const struct dc_sink *sink;
+
+	/* MST handled by drm_mst framework */
+	if (aconnector->mst_mgr.mst_state == true)
+		return;
+
+
+	sink = aconnector->dc_link->local_sink;
+
+	/* Edid mgmt connector gets first update only in mode_valid hook and then
+	 * the connector sink is set to either fake or physical sink depends on link status.
+	 * don't do it here if u are during boot
+	 */
+	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
+			&& aconnector->dc_em_sink) {
+
+		/* For S3 resume with headless use eml_sink to fake target
+		 * because on resume connecotr->sink is set ti NULL
+		 */
+		mutex_lock(&dev->mode_config.mutex);
+
+		if (sink) {
+			if (aconnector->dc_sink)
+				amdgpu_dm_remove_sink_from_freesync_module(
+								connector);
+			aconnector->dc_sink = sink;
+			amdgpu_dm_add_sink_to_freesync_module(
+						connector, aconnector->edid);
+		} else {
+			amdgpu_dm_remove_sink_from_freesync_module(connector);
+			if (!aconnector->dc_sink)
+				aconnector->dc_sink = aconnector->dc_em_sink;
+		}
+
+		mutex_unlock(&dev->mode_config.mutex);
+		return;
+	}
+
+	/*
+	 * TODO: temporary guard to look for proper fix
+	 * if this sink is MST sink, we should not do anything
+	 */
+	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+		return;
+
+	if (aconnector->dc_sink == sink) {
+		/* We got a DP short pulse (Link Loss, DP CTS, etc...).
+		 * Do nothing!! */
+		DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
+				aconnector->connector_id);
+		return;
+	}
+
+	DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
+		aconnector->connector_id, aconnector->dc_sink, sink);
+
+	mutex_lock(&dev->mode_config.mutex);
+
+	/* 1. Update status of the drm connector
+	 * 2. Send an event and let userspace tell us what to do */
+	if (sink) {
+		/* TODO: check if we still need the S3 mode update workaround.
+		 * If yes, put it here. */
+		if (aconnector->dc_sink)
+			amdgpu_dm_remove_sink_from_freesync_module(
+							connector);
+
+		aconnector->dc_sink = sink;
+		if (sink->dc_edid.length == 0)
+			aconnector->edid = NULL;
+		else {
+			aconnector->edid =
+				(struct edid *) sink->dc_edid.raw_edid;
+
+
+			drm_mode_connector_update_edid_property(connector,
+					aconnector->edid);
+		}
+		amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
+
+	} else {
+		amdgpu_dm_remove_sink_from_freesync_module(connector);
+		drm_mode_connector_update_edid_property(connector, NULL);
+		aconnector->num_modes = 0;
+		aconnector->dc_sink = NULL;
+	}
+
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void handle_hpd_irq(void *param)
+{
+	struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
+	struct drm_connector *connector = &aconnector->base;
+	struct drm_device *dev = connector->dev;
+
+	/* In case of failure or MST no need to update connector status or notify the OS
+	 * since (for MST case) MST does this in it's own context.
+	 */
+	mutex_lock(&aconnector->hpd_lock);
+	if (dc_link_detect(aconnector->dc_link, false)) {
+		amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+		drm_modeset_lock_all(dev);
+		dm_restore_drm_connector_state(dev, connector);
+		drm_modeset_unlock_all(dev);
+
+		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+			drm_kms_helper_hotplug_event(dev);
+	}
+	mutex_unlock(&aconnector->hpd_lock);
+
+}
+
+static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
+{
+	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+	uint8_t dret;
+	bool new_irq_handled = false;
+	int dpcd_addr;
+	int dpcd_bytes_to_read;
+
+	const int max_process_count = 30;
+	int process_count = 0;
+
+	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
+
+	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
+		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
+		/* DPCD 0x200 - 0x201 for downstream IRQ */
+		dpcd_addr = DP_SINK_COUNT;
+	} else {
+		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
+		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
+		dpcd_addr = DP_SINK_COUNT_ESI;
+	}
+
+	dret = drm_dp_dpcd_read(
+		&aconnector->dm_dp_aux.aux,
+		dpcd_addr,
+		esi,
+		dpcd_bytes_to_read);
+
+	while (dret == dpcd_bytes_to_read &&
+		process_count < max_process_count) {
+		uint8_t retry;
+		dret = 0;
+
+		process_count++;
+
+		DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+		/* handle HPD short pulse irq */
+		if (aconnector->mst_mgr.mst_state)
+			drm_dp_mst_hpd_irq(
+				&aconnector->mst_mgr,
+				esi,
+				&new_irq_handled);
+#endif
+
+		if (new_irq_handled) {
+			/* ACK at DPCD to notify down stream */
+			const int ack_dpcd_bytes_to_write =
+				dpcd_bytes_to_read - 1;
+
+			for (retry = 0; retry < 3; retry++) {
+				uint8_t wret;
+
+				wret = drm_dp_dpcd_write(
+					&aconnector->dm_dp_aux.aux,
+					dpcd_addr + 1,
+					&esi[1],
+					ack_dpcd_bytes_to_write);
+				if (wret == ack_dpcd_bytes_to_write)
+					break;
+			}
+
+			/* check if there is new irq to be handle */
+			dret = drm_dp_dpcd_read(
+				&aconnector->dm_dp_aux.aux,
+				dpcd_addr,
+				esi,
+				dpcd_bytes_to_read);
+
+			new_irq_handled = false;
+		} else
+			break;
+	}
+
+	if (process_count == max_process_count)
+		DRM_DEBUG_KMS("Loop exceeded max iterations\n");
+}
+
+static void handle_hpd_rx_irq(void *param)
+{
+	struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
+	struct drm_connector *connector = &aconnector->base;
+	struct drm_device *dev = connector->dev;
+	const struct dc_link *dc_link = aconnector->dc_link;
+	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+
+	/* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
+	 * conflict, after implement i2c helper, this mutex should be
+	 * retired.
+	 */
+	if (aconnector->dc_link->type != dc_connection_mst_branch)
+		mutex_lock(&aconnector->hpd_lock);
+
+	if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
+			!is_mst_root_connector) {
+		/* Downstream Port status changed. */
+		if (dc_link_detect(aconnector->dc_link, false)) {
+			amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+			drm_modeset_lock_all(dev);
+			dm_restore_drm_connector_state(dev, connector);
+			drm_modeset_unlock_all(dev);
+
+			drm_kms_helper_hotplug_event(dev);
+		}
+	}
+	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
+				(dc_link->type == dc_connection_mst_branch))
+		dm_handle_hpd_rx_irq(aconnector);
+
+	if (aconnector->dc_link->type != dc_connection_mst_branch)
+		mutex_unlock(&aconnector->hpd_lock);
+}
+
+static void register_hpd_handlers(struct amdgpu_device *adev)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_connector *connector;
+	struct amdgpu_connector *aconnector;
+	const struct dc_link *dc_link;
+	struct dc_interrupt_params int_params = {0};
+
+	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+	list_for_each_entry(connector,
+			&dev->mode_config.connector_list, head)	{
+
+		aconnector = to_amdgpu_connector(connector);
+		dc_link = aconnector->dc_link;
+
+		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+			int_params.irq_source = dc_link->irq_source_hpd;
+
+			amdgpu_dm_irq_register_interrupt(adev, &int_params,
+					handle_hpd_irq,
+					(void *) aconnector);
+		}
+
+		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+
+			/* Also register for DP short pulse (hpd_rx). */
+			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+			int_params.irq_source =	dc_link->irq_source_hpd_rx;
+
+			amdgpu_dm_irq_register_interrupt(adev, &int_params,
+					handle_hpd_rx_irq,
+					(void *) aconnector);
+		}
+	}
+}
+
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dce110_register_irq_handlers(struct amdgpu_device *adev)
+{
+	struct dc *dc = adev->dm.dc;
+	struct common_irq_params *c_irq_params;
+	struct dc_interrupt_params int_params = {0};
+	int r;
+	int i;
+
+	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+	/* Actions of amdgpu_irq_add_id():
+	 * 1. Register a set() function with base driver.
+	 *    Base driver will call set() function to enable/disable an
+	 *    interrupt in DC hardware.
+	 * 2. Register amdgpu_dm_irq_handler().
+	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+	 *    coming from DC hardware.
+	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+	 *    for acknowledging and handling. */
+
+	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT;
+			i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
+		r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->crtc_irq);
+		if (r) {
+			DRM_ERROR("Failed to add crtc irq id!\n");
+			return r;
+		}
+
+		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+		int_params.irq_source =
+			dc_interrupt_to_irq_source(dc, i, 0);
+
+		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+		c_irq_params->adev = adev;
+		c_irq_params->irq_src = int_params.irq_source;
+
+		amdgpu_dm_irq_register_interrupt(adev, &int_params,
+				dm_crtc_high_irq, c_irq_params);
+	}
+
+	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
+			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
+		r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+		if (r) {
+			DRM_ERROR("Failed to add page flip irq id!\n");
+			return r;
+		}
+
+		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+		int_params.irq_source =
+			dc_interrupt_to_irq_source(dc, i, 0);
+
+		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+		c_irq_params->adev = adev;
+		c_irq_params->irq_src = int_params.irq_source;
+
+		amdgpu_dm_irq_register_interrupt(adev, &int_params,
+				dm_pflip_high_irq, c_irq_params);
+
+	}
+
+	/* HPD */
+	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A,
+			&adev->hpd_irq);
+	if (r) {
+		DRM_ERROR("Failed to add hpd irq id!\n");
+		return r;
+	}
+
+	register_hpd_handlers(adev);
+
+	return 0;
+}
+
+static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
+{
+	int r;
+
+	adev->mode_info.mode_config_initialized = true;
+
+	amdgpu_dm_mode_funcs.fb_create =
+		amdgpu_mode_funcs.fb_create;
+	amdgpu_dm_mode_funcs.output_poll_changed =
+		amdgpu_mode_funcs.output_poll_changed;
+
+	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
+
+	adev->ddev->mode_config.max_width = 16384;
+	adev->ddev->mode_config.max_height = 16384;
+
+	adev->ddev->mode_config.preferred_depth = 24;
+	adev->ddev->mode_config.prefer_shadow = 1;
+	/* indicate support of immediate flip */
+	adev->ddev->mode_config.async_page_flip = true;
+
+	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+	r = amdgpu_modeset_create_props(adev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+{
+	struct amdgpu_display_manager *dm = bl_get_data(bd);
+
+	if (dc_link_set_backlight_level(dm->backlight_link,
+			bd->props.brightness, 0, 0))
+		return 0;
+	else
+		return 1;
+}
+
+static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+{
+	return bd->props.brightness;
+}
+
+static const struct backlight_ops amdgpu_dm_backlight_ops = {
+	.get_brightness = amdgpu_dm_backlight_get_brightness,
+	.update_status	= amdgpu_dm_backlight_update_status,
+};
+
+void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
+{
+	char bl_name[16];
+	struct backlight_properties props = { 0 };
+
+	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+	props.type = BACKLIGHT_RAW;
+
+	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
+			dm->adev->ddev->primary->index);
+
+	dm->backlight_dev = backlight_device_register(bl_name,
+			dm->adev->ddev->dev,
+			dm,
+			&amdgpu_dm_backlight_ops,
+			&props);
+
+	if (NULL == dm->backlight_dev)
+		DRM_ERROR("DM: Backlight registration failed!\n");
+	else
+		DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
+}
+
+#endif
+
+/* In this architecture, the association
+ * connector -> encoder -> crtc
+ * id not really requried. The crtc and connector will hold the
+ * display_index as an abstraction to use with DAL component
+ *
+ * Returns 0 on success
+ */
+int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+{
+	struct amdgpu_display_manager *dm = &adev->dm;
+	uint32_t i;
+	struct amdgpu_connector *aconnector;
+	struct amdgpu_encoder *aencoder;
+	struct amdgpu_crtc *acrtc;
+	uint32_t link_cnt;
+
+	link_cnt = dm->dc->caps.max_links;
+
+	if (amdgpu_dm_mode_config_init(dm->adev)) {
+		DRM_ERROR("DM: Failed to initialize mode config\n");
+		return -1;
+	}
+
+	for (i = 0; i < dm->dc->caps.max_targets; i++) {
+		acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
+		if (!acrtc)
+			goto fail;
+
+		if (amdgpu_dm_crtc_init(
+			dm,
+			acrtc,
+			i)) {
+			DRM_ERROR("KMS: Failed to initialize crtc\n");
+			kfree(acrtc);
+			goto fail;
+		}
+	}
+
+	dm->display_indexes_num = dm->dc->caps.max_targets;
+
+	/* loops over all connectors on the board */
+	for (i = 0; i < link_cnt; i++) {
+
+		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
+			DRM_ERROR(
+				"KMS: Cannot support more than %d display indexes\n",
+					AMDGPU_DM_MAX_DISPLAY_INDEX);
+			continue;
+		}
+
+		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
+		if (!aconnector)
+			goto fail;
+
+		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
+		if (!aencoder) {
+			goto fail_free_connector;
+		}
+
+		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
+			DRM_ERROR("KMS: Failed to initialize encoder\n");
+			goto fail_free_encoder;
+		}
+
+		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
+			DRM_ERROR("KMS: Failed to initialize connector\n");
+			goto fail_free_connector;
+		}
+
+		if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
+			amdgpu_dm_update_connector_after_detect(aconnector);
+	}
+
+	/* Software is initialized. Now we can register interrupt handlers. */
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+	case CHIP_HAWAII:
+	case CHIP_TONGA:
+	case CHIP_FIJI:
+	case CHIP_CARRIZO:
+	case CHIP_STONEY:
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS10:
+		if (dce110_register_irq_handlers(dm->adev)) {
+			DRM_ERROR("DM: Failed to initialize IRQ\n");
+			return -1;
+		}
+		break;
+	default:
+		DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+		return -1;
+	}
+
+	drm_mode_config_reset(dm->ddev);
+
+	return 0;
+fail_free_encoder:
+	kfree(aencoder);
+fail_free_connector:
+	kfree(aconnector);
+fail:
+	return -1;
+}
+
+void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
+{
+	drm_mode_config_cleanup(dm->ddev);
+	return;
+}
+
+/******************************************************************************
+ * amdgpu_display_funcs functions
+ *****************************************************************************/
+
+/**
+ * dm_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line buffer allocation.
+ */
+static void dm_bandwidth_update(struct amdgpu_device *adev)
+{
+	AMDGPU_DM_NOT_IMPL("%s\n", __func__);
+}
+
+static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
+				     u8 level)
+{
+	/* TODO: translate amdgpu_encoder to display_index and call DAL */
+	AMDGPU_DM_NOT_IMPL("%s\n", __func__);
+}
+
+static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
+{
+	/* TODO: translate amdgpu_encoder to display_index and call DAL */
+	AMDGPU_DM_NOT_IMPL("%s\n", __func__);
+	return 0;
+}
+
+/******************************************************************************
+ * Page Flip functions
+ ******************************************************************************/
+
+/**
+ * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
+ * 			via DRM IOCTL, by user mode.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (surface address update).
+ */
+static void dm_page_flip(struct amdgpu_device *adev,
+			 int crtc_id, u64 crtc_base, bool async)
+{
+	struct amdgpu_crtc *acrtc;
+	struct dc_target *target;
+	struct dc_flip_addrs addr = { {0} };
+
+	/*
+	 * TODO risk of concurrency issues
+	 *
+	 * This should guarded by the dal_mutex but we can't do this since the
+	 * caller uses a spin_lock on event_lock.
+	 *
+	 * If we wait on the dal_mutex a second page flip interrupt might come,
+	 * spin on the event_lock, disabling interrupts while it does so. At
+	 * this point the core can no longer be pre-empted and return to the
+	 * thread that waited on the dal_mutex and we're deadlocked.
+	 *
+	 * With multiple cores the same essentially happens but might just take
+	 * a little longer to lock up all cores.
+	 *
+	 * The reason we should lock on dal_mutex is so that we can be sure
+	 * nobody messes with acrtc->target after we read and check its value.
+	 *
+	 * We might be able to fix our concurrency issues with a work queue
+	 * where we schedule all work items (mode_set, page_flip, etc.) and
+	 * execute them one by one. Care needs to be taken to still deal with
+	 * any potential concurrency issues arising from interrupt calls.
+	 */
+
+	acrtc = adev->mode_info.crtcs[crtc_id];
+	target = acrtc->target;
+
+	/*
+	 * Received a page flip call after the display has been reset.
+	 * Just return in this case. Everything should be clean-up on reset.
+	 */
+
+	if (!target) {
+		WARN_ON(1);
+		return;
+	}
+
+	addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
+	addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
+	addr.flip_immediate = async;
+
+	DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
+			 __func__,
+			 addr.address.grph.addr.high_part,
+			 addr.address.grph.addr.low_part);
+
+	dc_flip_surface_addrs(
+			adev->dm.dc,
+			dc_target_get_status(target)->surfaces,
+			&addr, 1);
+}
+
+static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
+				struct drm_file *filp)
+{
+	struct mod_freesync_params freesync_params;
+	uint8_t num_targets;
+	uint8_t i;
+	struct dc_target *target;
+
+	struct amdgpu_device *adev = dev->dev_private;
+	int r = 0;
+
+	/* Get freesync enable flag from DRM */
+
+	num_targets = dc_get_current_target_count(adev->dm.dc);
+
+	for (i = 0; i < num_targets; i++) {
+
+		target = dc_get_target_at_index(adev->dm.dc, i);
+
+		mod_freesync_update_state(adev->dm.freesync_module,
+						target->streams,
+						target->stream_count,
+						&freesync_params);
+	}
+
+	return r;
+}
+
+#ifdef CONFIG_DRM_AMDGPU_CIK
+static const struct amdgpu_display_funcs dm_dce_v8_0_display_funcs = {
+	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
+	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
+	.vblank_wait = NULL,
+	.backlight_set_level =
+		dm_set_backlight_level,/* called unconditionally */
+	.backlight_get_level =
+		dm_get_backlight_level,/* called unconditionally */
+	.hpd_sense = NULL,/* called unconditionally */
+	.hpd_set_polarity = NULL, /* called unconditionally */
+	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
+	.page_flip = dm_page_flip, /* called unconditionally */
+	.page_flip_get_scanoutpos =
+		dm_crtc_get_scanoutpos,/* called unconditionally */
+	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
+	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
+	.notify_freesync = amdgpu_notify_freesync,
+};
+#endif
+
+static const struct amdgpu_display_funcs dm_dce_v10_0_display_funcs = {
+	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
+	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
+	.vblank_wait = NULL,
+	.backlight_set_level =
+		dm_set_backlight_level,/* called unconditionally */
+	.backlight_get_level =
+		dm_get_backlight_level,/* called unconditionally */
+	.hpd_sense = NULL,/* called unconditionally */
+	.hpd_set_polarity = NULL, /* called unconditionally */
+	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
+	.page_flip = dm_page_flip, /* called unconditionally */
+	.page_flip_get_scanoutpos =
+		dm_crtc_get_scanoutpos,/* called unconditionally */
+	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
+	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
+	.notify_freesync = amdgpu_notify_freesync,
+
+};
+
+static const struct amdgpu_display_funcs dm_dce_v11_0_display_funcs = {
+	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
+	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
+	.vblank_wait = NULL,
+	.backlight_set_level =
+		dm_set_backlight_level,/* called unconditionally */
+	.backlight_get_level =
+		dm_get_backlight_level,/* called unconditionally */
+	.hpd_sense = NULL,/* called unconditionally */
+	.hpd_set_polarity = NULL, /* called unconditionally */
+	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
+	.page_flip = dm_page_flip, /* called unconditionally */
+	.page_flip_get_scanoutpos =
+		dm_crtc_get_scanoutpos,/* called unconditionally */
+	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
+	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
+	.notify_freesync = amdgpu_notify_freesync,
+
+};
+
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+
+static ssize_t s3_debug_store(
+	struct device *device,
+	struct device_attribute *attr,
+	const char *buf,
+	size_t count)
+{
+	int ret;
+	int s3_state;
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	struct amdgpu_device *adev = drm_dev->dev_private;
+
+	ret = kstrtoint(buf, 0, &s3_state);
+
+	if (ret == 0) {
+		if (s3_state) {
+			dm_resume(adev);
+			amdgpu_dm_display_resume(adev);
+			drm_kms_helper_hotplug_event(adev->ddev);
+		} else
+			dm_suspend(adev);
+	}
+
+	return ret == 0 ? count : 0;
+}
+
+DEVICE_ATTR_WO(s3_debug);
+
+#endif
+
+static int dm_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_dm_set_irq_funcs(adev);
+
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+	case CHIP_HAWAII:
+		adev->mode_info.num_crtc = 6;
+		adev->mode_info.num_hpd = 6;
+		adev->mode_info.num_dig = 6;
+#ifdef CONFIG_DRM_AMDGPU_CIK
+		if (adev->mode_info.funcs == NULL)
+			adev->mode_info.funcs = &dm_dce_v8_0_display_funcs;
+#endif
+		break;
+	case CHIP_FIJI:
+	case CHIP_TONGA:
+		adev->mode_info.num_crtc = 6;
+		adev->mode_info.num_hpd = 6;
+		adev->mode_info.num_dig = 7;
+		if (adev->mode_info.funcs == NULL)
+			adev->mode_info.funcs = &dm_dce_v10_0_display_funcs;
+		break;
+	case CHIP_CARRIZO:
+		adev->mode_info.num_crtc = 3;
+		adev->mode_info.num_hpd = 6;
+		adev->mode_info.num_dig = 9;
+		if (adev->mode_info.funcs == NULL)
+			adev->mode_info.funcs = &dm_dce_v11_0_display_funcs;
+		break;
+	case CHIP_STONEY:
+		adev->mode_info.num_crtc = 2;
+		adev->mode_info.num_hpd = 6;
+		adev->mode_info.num_dig = 9;
+		if (adev->mode_info.funcs == NULL)
+			adev->mode_info.funcs = &dm_dce_v11_0_display_funcs;
+		break;
+	case CHIP_POLARIS11:
+		adev->mode_info.num_crtc = 5;
+		adev->mode_info.num_hpd = 5;
+		adev->mode_info.num_dig = 5;
+		if (adev->mode_info.funcs == NULL)
+			adev->mode_info.funcs = &dm_dce_v11_0_display_funcs;
+		break;
+	case CHIP_POLARIS10:
+		adev->mode_info.num_crtc = 6;
+		adev->mode_info.num_hpd = 6;
+		adev->mode_info.num_dig = 6;
+		if (adev->mode_info.funcs == NULL)
+			adev->mode_info.funcs = &dm_dce_v11_0_display_funcs;
+		break;
+	default:
+		DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+		return -EINVAL;
+	}
+
+	/* Note: Do NOT change adev->audio_endpt_rreg and
+	 * adev->audio_endpt_wreg because they are initialised in
+	 * amdgpu_device_init() */
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+	device_create_file(
+		adev->ddev->dev,
+		&dev_attr_s3_debug);
+#endif
+
+	return 0;
+}
+
+bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
+{
+	/* TODO */
+	return true;
+}
+
+bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
+{
+	/* TODO */
+	return true;
+}
+
+

+ 171 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h

@@ -0,0 +1,171 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_H__
+#define __AMDGPU_DM_H__
+
+/*
+#include "linux/switch.h"
+*/
+
+/*
+ * This file contains the definition for amdgpu_display_manager
+ * and its API for amdgpu driver's use.
+ * This component provides all the display related functionality
+ * and this is the only component that calls DAL API.
+ * The API contained here intended for amdgpu driver use.
+ * The API that is called directly from KMS framework is located
+ * in amdgpu_dm_kms.h file
+ */
+
+#define AMDGPU_DM_MAX_DISPLAY_INDEX 31
+/*
+#include "include/amdgpu_dal_power_if.h"
+#include "amdgpu_dm_irq.h"
+*/
+
+#include "irq_types.h"
+#include "signal_types.h"
+
+/* Forward declarations */
+struct amdgpu_device;
+struct drm_device;
+struct amdgpu_dm_irq_handler_data;
+
+struct amdgpu_dm_prev_state {
+	struct drm_framebuffer *fb;
+	int32_t x;
+	int32_t y;
+	struct drm_display_mode mode;
+};
+
+struct common_irq_params {
+	struct amdgpu_device *adev;
+	enum dc_irq_source irq_src;
+};
+
+struct irq_list_head {
+	struct list_head head;
+	/* In case this interrupt needs post-processing, 'work' will be queued*/
+	struct work_struct work;
+};
+
+struct amdgpu_display_manager {
+	struct dal *dal;
+	struct dc *dc;
+	struct cgs_device *cgs_device;
+	/* lock to be used when DAL is called from SYNC IRQ context */
+	spinlock_t dal_lock;
+
+	struct amdgpu_device *adev;	/*AMD base driver*/
+	struct drm_device *ddev;	/*DRM base driver*/
+	u16 display_indexes_num;
+
+	struct amdgpu_dm_prev_state prev_state;
+
+	/*
+	 * 'irq_source_handler_table' holds a list of handlers
+	 * per (DAL) IRQ source.
+	 *
+	 * Each IRQ source may need to be handled at different contexts.
+	 * By 'context' we mean, for example:
+	 * - The ISR context, which is the direct interrupt handler.
+	 * - The 'deferred' context - this is the post-processing of the
+	 *	interrupt, but at a lower priority.
+	 *
+	 * Note that handlers are called in the same order as they were
+	 * registered (FIFO).
+	 */
+	struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
+	struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
+
+	struct common_irq_params
+	pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
+
+	struct common_irq_params
+	vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
+
+	/* this spin lock synchronizes access to 'irq_handler_list_table' */
+	spinlock_t irq_handler_list_table_lock;
+
+	/* Timer-related data. */
+	struct list_head timer_handler_list;
+	struct workqueue_struct *timer_workqueue;
+
+	/* Use dal_mutex for any activity which is NOT syncronized by
+	 * DRM mode setting locks.
+	 * For example: amdgpu_dm_hpd_low_irq() calls into DAL *without*
+	 * DRM mode setting locks being acquired. This is where dal_mutex
+	 * is acquired before calling into DAL. */
+	struct mutex dal_mutex;
+
+	struct backlight_device *backlight_dev;
+
+	const struct dc_link *backlight_link;
+
+	struct work_struct mst_hotplug_work;
+
+	struct mod_freesync *freesync_module;
+};
+
+/* basic init/fini API */
+int amdgpu_dm_init(struct amdgpu_device *adev);
+
+void amdgpu_dm_fini(struct amdgpu_device *adev);
+
+void amdgpu_dm_destroy(void);
+
+/* initializes drm_device display related structures, based on the information
+ * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
+ * drm_encoder, drm_mode_config
+ *
+ * Returns 0 on success
+ */
+int amdgpu_dm_initialize_drm_device(
+	struct amdgpu_device *adev);
+
+/* removes and deallocates the drm structures, created by the above function */
+void amdgpu_dm_destroy_drm_device(
+	struct amdgpu_display_manager *dm);
+
+/* Locking/Mutex */
+bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm);
+
+bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm);
+
+/* Register "Backlight device" accessible by user-mode. */
+void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm);
+
+extern const struct amdgpu_ip_block_version dm_ip_block;
+
+void amdgpu_dm_update_connector_after_detect(
+	struct amdgpu_connector *aconnector);
+
+struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
+	struct drm_atomic_state *state,
+	struct drm_crtc *crtc,
+	bool from_state_var);
+
+#endif /* __AMDGPU_DM_H__ */

+ 484 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c

@@ -0,0 +1,484 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/acpi.h>
+#include <linux/version.h>
+#include <linux/i2c.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include <drm/drm_edid.h>
+
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "dc.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+#include "amdgpu_dm_types.h"
+
+#include "dm_helpers.h"
+
+/* dm_helpers_parse_edid_caps
+ *
+ * Parse edid caps
+ *
+ * @edid:	[in] pointer to edid
+ *  edid_caps:	[in] pointer to edid caps
+ * @return
+ *	void
+ * */
+enum dc_edid_status dm_helpers_parse_edid_caps(
+		struct dc_context *ctx,
+		const struct dc_edid *edid,
+		struct dc_edid_caps *edid_caps)
+{
+	struct edid *edid_buf = (struct edid *) edid->raw_edid;
+	struct cea_sad *sads;
+	int sad_count = -1;
+	int sadb_count = -1;
+	int i = 0;
+	int j = 0;
+	uint8_t *sadb = NULL;
+
+	enum dc_edid_status result = EDID_OK;
+
+	if (!edid_caps || !edid)
+		return EDID_BAD_INPUT;
+
+	if (!drm_edid_is_valid(edid_buf))
+		result = EDID_BAD_CHECKSUM;
+
+	edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
+					((uint16_t) edid_buf->mfg_id[1])<<8;
+	edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
+					((uint16_t) edid_buf->prod_code[1])<<8;
+	edid_caps->serial_number = edid_buf->serial;
+	edid_caps->manufacture_week = edid_buf->mfg_week;
+	edid_caps->manufacture_year = edid_buf->mfg_year;
+
+	/* One of the four detailed_timings stores the monitor name. It's
+	 * stored in an array of length 13. */
+	for (i = 0; i < 4; i++) {
+		if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
+			while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) {
+				if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
+					break;
+
+				edid_caps->display_name[j] =
+					edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
+				j++;
+			}
+		}
+	}
+
+	edid_caps->edid_hdmi = drm_detect_hdmi_monitor(
+			(struct edid *) edid->raw_edid);
+
+	sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
+	if (sad_count <= 0) {
+		DRM_INFO("SADs count is: %d, don't need to read it\n",
+				sad_count);
+		return result;
+	}
+
+	edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
+	for (i = 0; i < edid_caps->audio_mode_count; ++i) {
+		struct cea_sad *sad = &sads[i];
+
+		edid_caps->audio_modes[i].format_code = sad->format;
+		edid_caps->audio_modes[i].channel_count = sad->channels;
+		edid_caps->audio_modes[i].sample_rate = sad->freq;
+		edid_caps->audio_modes[i].sample_size = sad->byte2;
+	}
+
+	sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
+
+	if (sadb_count < 0) {
+		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
+		sadb_count = 0;
+	}
+
+	if (sadb_count)
+		edid_caps->speaker_flags = sadb[0];
+	else
+		edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
+
+	kfree(sads);
+	kfree(sadb);
+
+	return result;
+}
+
+static struct amdgpu_connector *get_connector_for_sink(
+	struct drm_device *dev,
+	const struct dc_sink *sink)
+{
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+		if (aconnector->dc_sink == sink)
+			return aconnector;
+	}
+
+	return NULL;
+}
+
+static struct amdgpu_connector *get_connector_for_link(
+	struct drm_device *dev,
+	const struct dc_link *link)
+{
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+		if (aconnector->dc_link == link)
+			return aconnector;
+	}
+
+	return NULL;
+}
+
+static void get_payload_table(
+		struct amdgpu_connector *aconnector,
+		struct dp_mst_stream_allocation_table *proposed_table)
+{
+	int i;
+	struct drm_dp_mst_topology_mgr *mst_mgr =
+			&aconnector->mst_port->mst_mgr;
+
+	mutex_lock(&mst_mgr->payload_lock);
+
+	proposed_table->stream_count = 0;
+
+	/* number of active streams */
+	for (i = 0; i < mst_mgr->max_payloads; i++) {
+		if (mst_mgr->payloads[i].num_slots == 0)
+			break; /* end of vcp_id table */
+
+		ASSERT(mst_mgr->payloads[i].payload_state !=
+				DP_PAYLOAD_DELETE_LOCAL);
+
+		if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
+			mst_mgr->payloads[i].payload_state ==
+					DP_PAYLOAD_REMOTE) {
+
+			struct dp_mst_stream_allocation *sa =
+					&proposed_table->stream_allocations[
+						proposed_table->stream_count];
+
+			sa->slot_count = mst_mgr->payloads[i].num_slots;
+			sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
+			proposed_table->stream_count++;
+		}
+	}
+
+	mutex_unlock(&mst_mgr->payload_lock);
+}
+
+/*
+ * Writes payload allocation table in immediate downstream device.
+ */
+bool dm_helpers_dp_mst_write_payload_allocation_table(
+		struct dc_context *ctx,
+		const struct dc_stream *stream,
+		struct dp_mst_stream_allocation_table *proposed_table,
+		bool enable)
+{
+	struct amdgpu_device *adev = ctx->driver_context;
+	struct drm_device *dev = adev->ddev;
+	struct amdgpu_connector *aconnector;
+	struct drm_dp_mst_topology_mgr *mst_mgr;
+	struct drm_dp_mst_port *mst_port;
+	int slots = 0;
+	bool ret;
+	int clock;
+	int bpp = 0;
+	int pbn = 0;
+
+	aconnector = get_connector_for_sink(dev, stream->sink);
+
+	if (!aconnector || !aconnector->mst_port)
+		return false;
+
+	mst_mgr = &aconnector->mst_port->mst_mgr;
+
+	if (!mst_mgr->mst_state)
+		return false;
+
+	mst_port = aconnector->port;
+
+	if (enable) {
+		clock = stream->timing.pix_clk_khz;
+
+		switch (stream->timing.display_color_depth) {
+
+		case COLOR_DEPTH_666:
+			bpp = 6;
+			break;
+		case COLOR_DEPTH_888:
+			bpp = 8;
+			break;
+		case COLOR_DEPTH_101010:
+			bpp = 10;
+			break;
+		case COLOR_DEPTH_121212:
+			bpp = 12;
+			break;
+		case COLOR_DEPTH_141414:
+			bpp = 14;
+			break;
+		case COLOR_DEPTH_161616:
+			bpp = 16;
+			break;
+		default:
+			ASSERT(bpp != 0);
+			break;
+		}
+
+		bpp = bpp * 3;
+
+		/* TODO need to know link rate */
+
+		pbn = drm_dp_calc_pbn_mode(clock, bpp);
+
+		slots = drm_dp_find_vcpi_slots(mst_mgr, pbn);
+		ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots);
+
+		if (!ret)
+			return false;
+
+	} else {
+		drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
+	}
+
+	ret = drm_dp_update_payload_part1(mst_mgr);
+
+	/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
+	 * AUX message. The sequence is slot 1-63 allocated sequence for each
+	 * stream. AMD ASIC stream slot allocation should follow the same
+	 * sequence. copy DRM MST allocation to dc */
+
+	get_payload_table(aconnector, proposed_table);
+
+	if (ret)
+		return false;
+
+	return true;
+}
+
+/*
+ * Polls for ACT (allocation change trigger) handled and sends
+ * ALLOCATE_PAYLOAD message.
+ */
+bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+		struct dc_context *ctx,
+		const struct dc_stream *stream)
+{
+	struct amdgpu_device *adev = ctx->driver_context;
+	struct drm_device *dev = adev->ddev;
+	struct amdgpu_connector *aconnector;
+	struct drm_dp_mst_topology_mgr *mst_mgr;
+	int ret;
+
+	aconnector = get_connector_for_sink(dev, stream->sink);
+
+	if (!aconnector || !aconnector->mst_port)
+		return false;
+
+	mst_mgr = &aconnector->mst_port->mst_mgr;
+
+	if (!mst_mgr->mst_state)
+		return false;
+
+	ret = drm_dp_check_act_status(mst_mgr);
+
+	if (ret)
+		return false;
+
+	return true;
+}
+
+bool dm_helpers_dp_mst_send_payload_allocation(
+		struct dc_context *ctx,
+		const struct dc_stream *stream,
+		bool enable)
+{
+	struct amdgpu_device *adev = ctx->driver_context;
+	struct drm_device *dev = adev->ddev;
+	struct amdgpu_connector *aconnector;
+	struct drm_dp_mst_topology_mgr *mst_mgr;
+	struct drm_dp_mst_port *mst_port;
+	int ret;
+
+	aconnector = get_connector_for_sink(dev, stream->sink);
+
+	if (!aconnector || !aconnector->mst_port)
+		return false;
+
+	mst_port = aconnector->port;
+
+	mst_mgr = &aconnector->mst_port->mst_mgr;
+
+	if (!mst_mgr->mst_state)
+		return false;
+
+	ret = drm_dp_update_payload_part2(mst_mgr);
+
+	if (ret)
+		return false;
+
+	if (!enable)
+		drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
+
+	return true;
+}
+
+bool dm_helpers_dp_mst_start_top_mgr(
+		struct dc_context *ctx,
+		const struct dc_link *link,
+		bool boot)
+{
+	struct amdgpu_device *adev = ctx->driver_context;
+	struct drm_device *dev = adev->ddev;
+	struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
+
+	if (!aconnector) {
+			DRM_ERROR("Failed to found connector for link!");
+			return false;
+	}
+
+	if (boot) {
+		DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
+					aconnector, aconnector->base.base.id);
+		return true;
+	}
+
+	DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+			aconnector, aconnector->base.base.id);
+
+	return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
+}
+
+void dm_helpers_dp_mst_stop_top_mgr(
+		struct dc_context *ctx,
+		const struct dc_link *link)
+{
+	struct amdgpu_device *adev = ctx->driver_context;
+	struct drm_device *dev = adev->ddev;
+	struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
+
+	if (!aconnector) {
+			DRM_ERROR("Failed to found connector for link!");
+			return;
+	}
+
+	DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
+			aconnector, aconnector->base.base.id);
+
+	if (aconnector->mst_mgr.mst_state == true)
+		drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
+}
+
+bool dm_helpers_dp_read_dpcd(
+		struct dc_context *ctx,
+		const struct dc_link *link,
+		uint32_t address,
+		uint8_t *data,
+		uint32_t size)
+{
+
+	struct amdgpu_device *adev = ctx->driver_context;
+	struct drm_device *dev = adev->ddev;
+	struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
+
+	if (!aconnector) {
+		DRM_ERROR("Failed to found connector for link!");
+		return false;
+	}
+
+	return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
+			data, size) > 0;
+}
+
+bool dm_helpers_dp_write_dpcd(
+		struct dc_context *ctx,
+		const struct dc_link *link,
+		uint32_t address,
+		const uint8_t *data,
+		uint32_t size)
+{
+
+	struct amdgpu_device *adev = ctx->driver_context;
+	struct drm_device *dev = adev->ddev;
+	struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
+
+	if (!aconnector) {
+		DRM_ERROR("Failed to found connector for link!");
+		return false;
+	}
+
+	return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
+			address, (uint8_t *)data, size) > 0;
+}
+
+bool dm_helpers_submit_i2c(
+		struct dc_context *ctx,
+		const struct dc_link *link,
+		struct i2c_command *cmd)
+{
+	struct amdgpu_device *adev = ctx->driver_context;
+	struct drm_device *dev = adev->ddev;
+	struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
+	struct i2c_msg *msgs;
+	int i = 0;
+	int num = cmd->number_of_payloads;
+	bool result;
+
+	if (!aconnector) {
+		DRM_ERROR("Failed to found connector for link!");
+		return false;
+	}
+
+	msgs = kzalloc(num * sizeof(struct i2c_msg), GFP_KERNEL);
+
+	if (!msgs)
+		return false;
+
+	for (i = 0; i < num; i++) {
+		msgs[i].flags = cmd->payloads[i].write ? I2C_M_RD : 0;
+		msgs[i].addr = cmd->payloads[i].address;
+		msgs[i].len = cmd->payloads[i].length;
+		msgs[i].buf = cmd->payloads[i].data;
+	}
+
+	result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
+
+	kfree(msgs);
+
+	return result;
+}

+ 829 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c

@@ -0,0 +1,829 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <drm/drmP.h>
+
+#include "dm_services_types.h"
+#include "dc.h"
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+
+/******************************************************************************
+ * Private declarations.
+ *****************************************************************************/
+
+struct handler_common_data {
+	struct list_head list;
+	interrupt_handler handler;
+	void *handler_arg;
+
+	/* DM which this handler belongs to */
+	struct amdgpu_display_manager *dm;
+};
+
+struct amdgpu_dm_irq_handler_data {
+	struct handler_common_data hcd;
+	/* DAL irq source which registered for this interrupt. */
+	enum dc_irq_source irq_source;
+};
+
+struct amdgpu_dm_timer_handler_data {
+	struct handler_common_data hcd;
+	struct delayed_work d_work;
+};
+
+#define DM_IRQ_TABLE_LOCK(adev, flags) \
+	spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
+
+#define DM_IRQ_TABLE_UNLOCK(adev, flags) \
+	spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
+
+/******************************************************************************
+ * Private functions.
+ *****************************************************************************/
+
+static void init_handler_common_data(
+	struct handler_common_data *hcd,
+	void (*ih)(void *),
+	void *args,
+	struct amdgpu_display_manager *dm)
+{
+	hcd->handler = ih;
+	hcd->handler_arg = args;
+	hcd->dm = dm;
+}
+
+/**
+ * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper.
+ *
+ * @work: work struct
+ */
+static void dm_irq_work_func(struct work_struct *work)
+{
+	struct list_head *entry;
+	struct irq_list_head *irq_list_head =
+		container_of(work, struct irq_list_head, work);
+	struct list_head *handler_list = &irq_list_head->head;
+	struct amdgpu_dm_irq_handler_data *handler_data;
+
+	list_for_each(entry, handler_list) {
+		handler_data =
+			list_entry(
+				entry,
+				struct amdgpu_dm_irq_handler_data,
+				hcd.list);
+
+		DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
+				handler_data->irq_source);
+
+		DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
+			handler_data->irq_source);
+
+		handler_data->hcd.handler(handler_data->hcd.handler_arg);
+	}
+
+	/* Call a DAL subcomponent which registered for interrupt notification
+	 * at INTERRUPT_LOW_IRQ_CONTEXT.
+	 * (The most common use is HPD interrupt) */
+}
+
+/**
+ * Remove a handler and return a pointer to hander list from which the
+ * handler was removed.
+ */
+static struct list_head *remove_irq_handler(
+	struct amdgpu_device *adev,
+	void *ih,
+	const struct dc_interrupt_params *int_params)
+{
+	struct list_head *hnd_list;
+	struct list_head *entry, *tmp;
+	struct amdgpu_dm_irq_handler_data *handler;
+	unsigned long irq_table_flags;
+	bool handler_removed = false;
+	enum dc_irq_source irq_source;
+
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	irq_source = int_params->irq_source;
+
+	switch (int_params->int_context) {
+	case INTERRUPT_HIGH_IRQ_CONTEXT:
+		hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
+		break;
+	case INTERRUPT_LOW_IRQ_CONTEXT:
+	default:
+		hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
+		break;
+	}
+
+	list_for_each_safe(entry, tmp, hnd_list) {
+
+		handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
+				hcd.list);
+
+		if (ih == handler) {
+			/* Found our handler. Remove it from the list. */
+			list_del(&handler->hcd.list);
+			handler_removed = true;
+			break;
+		}
+	}
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+	if (handler_removed == false) {
+		/* Not necessarily an error - caller may not
+		 * know the context. */
+		return NULL;
+	}
+
+	kfree(handler);
+
+	DRM_DEBUG_KMS(
+	"DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
+		ih, int_params->irq_source, int_params->int_context);
+
+	return hnd_list;
+}
+
+/* If 'handler_in == NULL' then remove ALL handlers. */
+static void remove_timer_handler(
+	struct amdgpu_device *adev,
+	struct amdgpu_dm_timer_handler_data *handler_in)
+{
+	struct amdgpu_dm_timer_handler_data *handler_temp;
+	struct list_head *handler_list;
+	struct list_head *entry, *tmp;
+	unsigned long irq_table_flags;
+	bool handler_removed = false;
+
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	handler_list = &adev->dm.timer_handler_list;
+
+	list_for_each_safe(entry, tmp, handler_list) {
+		/* Note that list_for_each_safe() guarantees that
+		 * handler_temp is NOT null. */
+		handler_temp = list_entry(entry,
+				struct amdgpu_dm_timer_handler_data, hcd.list);
+
+		if (handler_in == NULL || handler_in == handler_temp) {
+			list_del(&handler_temp->hcd.list);
+			DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+			DRM_DEBUG_KMS("DM_IRQ: removing timer handler: %p\n",
+					handler_temp);
+
+			if (handler_in == NULL) {
+				/* Since it is still in the queue, it must
+				 * be cancelled. */
+				cancel_delayed_work_sync(&handler_temp->d_work);
+			}
+
+			kfree(handler_temp);
+			handler_removed = true;
+
+			DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+		}
+
+		if (handler_in == NULL) {
+			/* Remove ALL handlers. */
+			continue;
+		}
+
+		if (handler_in == handler_temp) {
+			/* Remove a SPECIFIC handler.
+			 * Found our handler - we can stop here. */
+			break;
+		}
+	}
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+	if (handler_in != NULL && handler_removed == false) {
+		DRM_ERROR("DM_IRQ: handler: %p is not in the list!\n",
+				handler_in);
+	}
+}
+
+/**
+ * dm_timer_work_func - Handle a timer.
+ *
+ * @work: work struct
+ */
+static void dm_timer_work_func(
+	struct work_struct *work)
+{
+	struct amdgpu_dm_timer_handler_data *handler_data =
+		container_of(work, struct amdgpu_dm_timer_handler_data,
+				d_work.work);
+
+	DRM_DEBUG_KMS("DM_IRQ: work_func: handler_data=%p\n", handler_data);
+
+	/* Call a DAL subcomponent which registered for timer notification. */
+	handler_data->hcd.handler(handler_data->hcd.handler_arg);
+
+	/* We support only "single shot" timers. That means we must delete
+	 * the handler after it was called. */
+	remove_timer_handler(handler_data->hcd.dm->adev, handler_data);
+}
+
+static bool validate_irq_registration_params(
+	struct dc_interrupt_params *int_params,
+	void (*ih)(void *))
+{
+	if (NULL == int_params || NULL == ih) {
+		DRM_ERROR("DM_IRQ: invalid input!\n");
+		return false;
+	}
+
+	if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
+		DRM_ERROR("DM_IRQ: invalid context: %d!\n",
+				int_params->int_context);
+		return false;
+	}
+
+	if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
+		DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
+				int_params->irq_source);
+		return false;
+	}
+
+	return true;
+}
+
+static bool validate_irq_unregistration_params(
+	enum dc_irq_source irq_source,
+	irq_handler_idx handler_idx)
+{
+	if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
+		DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
+		return false;
+	}
+
+	if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
+		DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
+		return false;
+	}
+
+	return true;
+}
+/******************************************************************************
+ * Public functions.
+ *
+ * Note: caller is responsible for input validation.
+ *****************************************************************************/
+
+void *amdgpu_dm_irq_register_interrupt(
+	struct amdgpu_device *adev,
+	struct dc_interrupt_params *int_params,
+	void (*ih)(void *),
+	void *handler_args)
+{
+	struct list_head *hnd_list;
+	struct amdgpu_dm_irq_handler_data *handler_data;
+	unsigned long irq_table_flags;
+	enum dc_irq_source irq_source;
+
+	if (false == validate_irq_registration_params(int_params, ih))
+		return DAL_INVALID_IRQ_HANDLER_IDX;
+
+	handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
+	if (!handler_data) {
+		DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
+		return DAL_INVALID_IRQ_HANDLER_IDX;
+	}
+
+	memset(handler_data, 0, sizeof(*handler_data));
+
+	init_handler_common_data(&handler_data->hcd, ih, handler_args,
+			&adev->dm);
+
+	irq_source = int_params->irq_source;
+
+	handler_data->irq_source = irq_source;
+
+	/* Lock the list, add the handler. */
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	switch (int_params->int_context) {
+	case INTERRUPT_HIGH_IRQ_CONTEXT:
+		hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
+		break;
+	case INTERRUPT_LOW_IRQ_CONTEXT:
+	default:
+		hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
+		break;
+	}
+
+	list_add_tail(&handler_data->hcd.list, hnd_list);
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+	/* This pointer will be stored by code which requested interrupt
+	 * registration.
+	 * The same pointer will be needed in order to unregister the
+	 * interrupt. */
+
+	DRM_DEBUG_KMS(
+		"DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
+		handler_data,
+		irq_source,
+		int_params->int_context);
+
+	return handler_data;
+}
+
+void amdgpu_dm_irq_unregister_interrupt(
+	struct amdgpu_device *adev,
+	enum dc_irq_source irq_source,
+	void *ih)
+{
+	struct list_head *handler_list;
+	struct dc_interrupt_params int_params;
+	int i;
+
+	if (false == validate_irq_unregistration_params(irq_source, ih))
+		return;
+
+	memset(&int_params, 0, sizeof(int_params));
+
+	int_params.irq_source = irq_source;
+
+	for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
+
+		int_params.int_context = i;
+
+		handler_list = remove_irq_handler(adev, ih, &int_params);
+
+		if (handler_list != NULL)
+			break;
+	}
+
+	if (handler_list == NULL) {
+		/* If we got here, it means we searched all irq contexts
+		 * for this irq source, but the handler was not found. */
+		DRM_ERROR(
+		"DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
+			ih, irq_source);
+	}
+}
+
+int amdgpu_dm_irq_init(
+	struct amdgpu_device *adev)
+{
+	int src;
+	struct irq_list_head *lh;
+
+	DRM_DEBUG_KMS("DM_IRQ\n");
+
+	spin_lock_init(&adev->dm.irq_handler_list_table_lock);
+
+	for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+		/* low context handler list init */
+		lh = &adev->dm.irq_handler_list_low_tab[src];
+		INIT_LIST_HEAD(&lh->head);
+		INIT_WORK(&lh->work, dm_irq_work_func);
+
+		/* high context handler init */
+		INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
+	}
+
+	INIT_LIST_HEAD(&adev->dm.timer_handler_list);
+
+	/* allocate and initialize the workqueue for DM timer */
+	adev->dm.timer_workqueue = create_singlethread_workqueue(
+			"dm_timer_queue");
+	if (adev->dm.timer_workqueue == NULL) {
+		DRM_ERROR("DM_IRQ: unable to create timer queue!\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+void amdgpu_dm_irq_register_timer(
+	struct amdgpu_device *adev,
+	struct dc_timer_interrupt_params *int_params,
+	interrupt_handler ih,
+	void *args)
+{
+	unsigned long jf_delay;
+	struct list_head *handler_list;
+	struct amdgpu_dm_timer_handler_data *handler_data;
+	unsigned long irq_table_flags;
+
+	handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
+	if (!handler_data) {
+		DRM_ERROR("DM_IRQ: failed to allocate timer handler!\n");
+		return;
+	}
+
+	memset(handler_data, 0, sizeof(*handler_data));
+
+	init_handler_common_data(&handler_data->hcd, ih, args, &adev->dm);
+
+	INIT_DELAYED_WORK(&handler_data->d_work, dm_timer_work_func);
+
+	/* Lock the list, add the handler. */
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	handler_list = &adev->dm.timer_handler_list;
+
+	list_add_tail(&handler_data->hcd.list, handler_list);
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+	jf_delay = usecs_to_jiffies(int_params->micro_sec_interval);
+
+	queue_delayed_work(adev->dm.timer_workqueue, &handler_data->d_work,
+			jf_delay);
+
+	DRM_DEBUG_KMS("DM_IRQ: added handler:%p with micro_sec_interval=%u\n",
+			handler_data, int_params->micro_sec_interval);
+	return;
+}
+
+/* DM IRQ and timer resource release */
+void amdgpu_dm_irq_fini(
+	struct amdgpu_device *adev)
+{
+	int src;
+	struct irq_list_head *lh;
+	DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
+
+	for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+
+		/* The handler was removed from the table,
+		 * it means it is safe to flush all the 'work'
+		 * (because no code can schedule a new one). */
+		lh = &adev->dm.irq_handler_list_low_tab[src];
+		flush_work(&lh->work);
+	}
+
+	/* Cancel ALL timers and release handlers (if any). */
+	remove_timer_handler(adev, NULL);
+	/* Release the queue itself. */
+	destroy_workqueue(adev->dm.timer_workqueue);
+}
+
+int amdgpu_dm_irq_suspend(
+	struct amdgpu_device *adev)
+{
+	int src;
+	struct list_head *hnd_list_h;
+	struct list_head *hnd_list_l;
+	unsigned long irq_table_flags;
+
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	DRM_DEBUG_KMS("DM_IRQ: suspend\n");
+
+	/* disable HW interrupt */
+	for (src = DC_IRQ_SOURCE_HPD1; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+		hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+		if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+			dc_interrupt_set(adev->dm.dc, src, false);
+
+		DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+		flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
+
+		DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+	}
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+	return 0;
+}
+
+int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
+{
+	int src;
+	struct list_head *hnd_list_h, *hnd_list_l;
+	unsigned long irq_table_flags;
+
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	DRM_DEBUG_KMS("DM_IRQ: early resume\n");
+
+	/* re-enable short pulse interrupts HW interrupt */
+	for (src = DC_IRQ_SOURCE_HPD1RX; src < DC_IRQ_SOURCE_HPD6RX + 1; src++) {
+		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+		hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+		if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+			dc_interrupt_set(adev->dm.dc, src, true);
+	}
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+	return 0;
+}
+
+int amdgpu_dm_irq_resume(struct amdgpu_device *adev)
+{
+	int src;
+	struct list_head *hnd_list_h, *hnd_list_l;
+	unsigned long irq_table_flags;
+
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	DRM_DEBUG_KMS("DM_IRQ: resume\n");
+
+	/* re-enable HW interrupt */
+	for (src = DC_IRQ_SOURCE_HPD1; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+		hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+		if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+			dc_interrupt_set(adev->dm.dc, src, true);
+	}
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+	return 0;
+}
+
+/**
+ * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
+ * "irq_source".
+ */
+static void amdgpu_dm_irq_schedule_work(
+	struct amdgpu_device *adev,
+	enum dc_irq_source irq_source)
+{
+	unsigned long irq_table_flags;
+	struct work_struct *work = NULL;
+
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
+		work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+	if (work) {
+		if (!schedule_work(work))
+			DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
+						irq_source);
+	}
+
+}
+
+/** amdgpu_dm_irq_immediate_work
+ *  Callback high irq work immediately, don't send to work queue
+ */
+static void amdgpu_dm_irq_immediate_work(
+	struct amdgpu_device *adev,
+	enum dc_irq_source irq_source)
+{
+	struct amdgpu_dm_irq_handler_data *handler_data;
+	struct list_head *entry;
+	unsigned long irq_table_flags;
+
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	list_for_each(
+		entry,
+		&adev->dm.irq_handler_list_high_tab[irq_source]) {
+
+		handler_data =
+			list_entry(
+				entry,
+				struct amdgpu_dm_irq_handler_data,
+				hcd.list);
+
+		/* Call a subcomponent which registered for immediate
+		 * interrupt notification */
+		handler_data->hcd.handler(handler_data->hcd.handler_arg);
+	}
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+}
+
+/*
+ * amdgpu_dm_irq_handler
+ *
+ * Generic IRQ handler, calls all registered high irq work immediately, and
+ * schedules work for low irq
+ */
+int amdgpu_dm_irq_handler(
+		struct amdgpu_device *adev,
+		struct amdgpu_irq_src *source,
+		struct amdgpu_iv_entry *entry)
+{
+
+	enum dc_irq_source src =
+		dc_interrupt_to_irq_source(
+			adev->dm.dc,
+			entry->src_id,
+			entry->src_data[0]);
+
+	dc_interrupt_ack(adev->dm.dc, src);
+
+	/* Call high irq work immediately */
+	amdgpu_dm_irq_immediate_work(adev, src);
+	/*Schedule low_irq work */
+	amdgpu_dm_irq_schedule_work(adev, src);
+
+	return 0;
+}
+
+static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
+{
+	switch (type) {
+	case AMDGPU_HPD_1:
+		return DC_IRQ_SOURCE_HPD1;
+	case AMDGPU_HPD_2:
+		return DC_IRQ_SOURCE_HPD2;
+	case AMDGPU_HPD_3:
+		return DC_IRQ_SOURCE_HPD3;
+	case AMDGPU_HPD_4:
+		return DC_IRQ_SOURCE_HPD4;
+	case AMDGPU_HPD_5:
+		return DC_IRQ_SOURCE_HPD5;
+	case AMDGPU_HPD_6:
+		return DC_IRQ_SOURCE_HPD6;
+	default:
+		return DC_IRQ_SOURCE_INVALID;
+	}
+}
+
+static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
+					struct amdgpu_irq_src *source,
+					unsigned type,
+					enum amdgpu_interrupt_state state)
+{
+	enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
+	bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
+
+	dc_interrupt_set(adev->dm.dc, src, st);
+	return 0;
+}
+
+static inline int dm_irq_state(
+	struct amdgpu_device *adev,
+	struct amdgpu_irq_src *source,
+	unsigned crtc_id,
+	enum amdgpu_interrupt_state state,
+	const enum irq_type dal_irq_type,
+	const char *func)
+{
+	bool st;
+	enum dc_irq_source irq_source;
+
+	struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
+
+	if (!acrtc) {
+		DRM_ERROR(
+			"%s: crtc is NULL at id :%d\n",
+	 		func,
+	 		crtc_id);
+		return 0;
+	}
+
+	irq_source = dal_irq_type + acrtc->otg_inst;
+
+	st = (state == AMDGPU_IRQ_STATE_ENABLE);
+
+	dc_interrupt_set(adev->dm.dc, irq_source, st);
+	return 0;
+}
+
+static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
+					struct amdgpu_irq_src *source,
+					unsigned crtc_id,
+					enum amdgpu_interrupt_state state)
+{
+	return dm_irq_state(
+		adev,
+		source,
+		crtc_id,
+		state,
+		IRQ_TYPE_PFLIP,
+		__func__);
+}
+
+static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
+					struct amdgpu_irq_src *source,
+					unsigned crtc_id,
+					enum amdgpu_interrupt_state state)
+{
+	return dm_irq_state(
+		adev,
+		source,
+		crtc_id,
+		state,
+		IRQ_TYPE_VUPDATE,
+		__func__);
+}
+
+static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
+	.set = amdgpu_dm_set_crtc_irq_state,
+	.process = amdgpu_dm_irq_handler,
+};
+
+static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
+	.set = amdgpu_dm_set_pflip_irq_state,
+	.process = amdgpu_dm_irq_handler,
+};
+
+static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
+	.set = amdgpu_dm_set_hpd_irq_state,
+	.process = amdgpu_dm_irq_handler,
+};
+
+void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+	adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
+
+	adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+	adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
+
+	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+	adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
+}
+
+/*
+ * amdgpu_dm_hpd_init - hpd setup callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct amdgpu_connector *amdgpu_connector =
+				to_amdgpu_connector(connector);
+
+		const struct dc_link *dc_link = amdgpu_connector->dc_link;
+
+		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+			dc_interrupt_set(adev->dm.dc,
+					dc_link->irq_source_hpd,
+					true);
+		}
+
+		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+			dc_interrupt_set(adev->dm.dc,
+					dc_link->irq_source_hpd_rx,
+					true);
+		}
+	}
+}
+
+/**
+ * amdgpu_dm_hpd_fini - hpd tear down callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct amdgpu_connector *amdgpu_connector =
+				to_amdgpu_connector(connector);
+		const struct dc_link *dc_link = amdgpu_connector->dc_link;
+
+		dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false);
+
+		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+			dc_interrupt_set(adev->dm.dc,
+					dc_link->irq_source_hpd_rx,
+					false);
+		}
+	}
+}

+ 122 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h

@@ -0,0 +1,122 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_DM_IRQ_H__
+#define __AMDGPU_DM_IRQ_H__
+
+#include "irq_types.h" /* DAL irq definitions */
+
+/*
+ * Display Manager IRQ-related interfaces (for use by DAL).
+ */
+
+/**
+ * amdgpu_dm_irq_init - Initialize internal structures of 'amdgpu_dm_irq'.
+ *
+ * This function should be called exactly once - during DM initialization.
+ *
+ * Returns:
+ *	0 - success
+ *	non-zero - error
+ */
+int amdgpu_dm_irq_init(
+	struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_fini - deallocate internal structures of 'amdgpu_dm_irq'.
+ *
+ * This function should be called exactly once - during DM destruction.
+ *
+ */
+void amdgpu_dm_irq_fini(
+	struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_register_interrupt - register irq handler for Display block.
+ *
+ * @adev: AMD DRM device
+ * @int_params: parameters for the irq
+ * @ih: pointer to the irq hander function
+ * @handler_args: arguments which will be passed to ih
+ *
+ * Returns:
+ * 	IRQ Handler Index on success.
+ * 	NULL on failure.
+ *
+ * Cannot be called from an interrupt handler.
+ */
+void *amdgpu_dm_irq_register_interrupt(
+		struct amdgpu_device *adev,
+		struct dc_interrupt_params *int_params,
+		void (*ih)(void *),
+		void *handler_args);
+
+/**
+ * amdgpu_dm_irq_unregister_interrupt - unregister handler which was registered
+ *	by amdgpu_dm_irq_register_interrupt().
+ *
+ * @adev: AMD DRM device.
+ * @ih_index: irq handler index which was returned by
+ *	amdgpu_dm_irq_register_interrupt
+ */
+void amdgpu_dm_irq_unregister_interrupt(
+		struct amdgpu_device *adev,
+		enum dc_irq_source irq_source,
+		void *ih_index);
+
+void amdgpu_dm_irq_register_timer(
+	struct amdgpu_device *adev,
+	struct dc_timer_interrupt_params *int_params,
+	interrupt_handler ih,
+	void *args);
+
+/**
+ * amdgpu_dm_irq_handler
+ * Generic IRQ handler, calls all registered high irq work immediately, and
+ * schedules work for low irq
+ */
+int amdgpu_dm_irq_handler(
+		struct amdgpu_device *adev,
+		struct amdgpu_irq_src *source,
+		struct amdgpu_iv_entry *entry);
+
+void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev);
+
+void amdgpu_dm_hpd_init(struct amdgpu_device *adev);
+void amdgpu_dm_hpd_fini(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend.
+ *
+ */
+int amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume.
+ * amdgpu_dm_irq_resume - enable ASIC interrupt during resume.
+ *
+ */
+int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
+int amdgpu_dm_irq_resume(struct amdgpu_device *adev);
+
+#endif /* __AMDGPU_DM_IRQ_H__ */

+ 443 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c

@@ -0,0 +1,443 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/version.h>
+#include <drm/drm_atomic_helper.h>
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "amdgpu_dm_types.h"
+#include "amdgpu_dm_mst_types.h"
+
+#include "dc.h"
+#include "dm_helpers.h"
+
+/* #define TRACE_DPCD */
+
+#ifdef TRACE_DPCD
+#define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
+
+static inline char *side_band_msg_type_to_str(uint32_t address)
+{
+	static char str[10] = {0};
+
+	if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
+		strcpy(str, "DOWN_REQ");
+	else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
+		strcpy(str, "UP_REP");
+	else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
+		strcpy(str, "DOWN_REP");
+	else
+		strcpy(str, "UP_REQ");
+
+	return str;
+}
+
+void log_dpcd(uint8_t type,
+		uint32_t address,
+		uint8_t *data,
+		uint32_t size,
+		bool res)
+{
+	DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
+			(type == DP_AUX_NATIVE_READ) ||
+			(type == DP_AUX_I2C_READ) ?
+					"Read" : "Write",
+			address,
+			SIDE_BAND_MSG(address) ?
+					side_band_msg_type_to_str(address) : "Nop",
+			res ? "OK" : "Fail");
+
+	if (res) {
+		print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
+	}
+}
+#endif
+
+static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+	struct pci_dev *pdev = to_pci_dev(aux->dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	struct amdgpu_device *adev = drm_dev->dev_private;
+	struct dc *dc = adev->dm.dc;
+	bool res;
+
+	switch (msg->request) {
+	case DP_AUX_NATIVE_READ:
+		res = dc_read_dpcd(
+			dc,
+			TO_DM_AUX(aux)->link_index,
+			msg->address,
+			msg->buffer,
+			msg->size);
+		break;
+	case DP_AUX_NATIVE_WRITE:
+		res = dc_write_dpcd(
+			dc,
+			TO_DM_AUX(aux)->link_index,
+			msg->address,
+			msg->buffer,
+			msg->size);
+		break;
+	default:
+		return 0;
+	}
+
+#ifdef TRACE_DPCD
+	log_dpcd(msg->request,
+			msg->address,
+			msg->buffer,
+			msg->size,
+			res);
+#endif
+
+	return msg->size;
+}
+
+static enum drm_connector_status
+dm_dp_mst_detect(struct drm_connector *connector, bool force)
+{
+	struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+	struct amdgpu_connector *master = aconnector->mst_port;
+
+	enum drm_connector_status status =
+		drm_dp_mst_detect_port(
+			connector,
+			&master->mst_mgr,
+			aconnector->port);
+
+	/*
+	 * we do not want to make this connector connected until we have edid on
+	 * it
+	 */
+	if (status == connector_status_connected &&
+		!aconnector->port->cached_edid)
+		status = connector_status_disconnected;
+
+	return status;
+}
+
+static void
+dm_dp_mst_connector_destroy(struct drm_connector *connector)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	struct amdgpu_encoder *amdgpu_encoder = amdgpu_connector->mst_encoder;
+
+	drm_encoder_cleanup(&amdgpu_encoder->base);
+	kfree(amdgpu_encoder);
+	drm_connector_cleanup(connector);
+	kfree(amdgpu_connector);
+}
+
+static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
+	.detect = dm_dp_mst_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = dm_dp_mst_connector_destroy,
+	.reset = amdgpu_dm_connector_funcs_reset,
+	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.atomic_set_property = amdgpu_dm_connector_atomic_set_property
+};
+
+static int dm_dp_mst_get_modes(struct drm_connector *connector)
+{
+	struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+	int ret = 0;
+
+	ret = drm_add_edid_modes(&aconnector->base, aconnector->edid);
+
+	drm_edid_to_eld(&aconnector->base, aconnector->edid);
+
+	return ret;
+}
+
+static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+	return &amdgpu_connector->mst_encoder->base;
+}
+
+static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
+	.get_modes = dm_dp_mst_get_modes,
+	.mode_valid = amdgpu_dm_connector_mode_valid,
+	.best_encoder = dm_mst_best_encoder,
+};
+
+static struct amdgpu_encoder *
+dm_dp_create_fake_mst_encoder(struct amdgpu_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder;
+	struct drm_encoder *encoder;
+	const struct drm_connector_helper_funcs *connector_funcs =
+		connector->base.helper_private;
+	struct drm_encoder *enc_master =
+		connector_funcs->best_encoder(&connector->base);
+
+	DRM_DEBUG_KMS("enc master is %p\n", enc_master);
+	amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
+	if (!amdgpu_encoder)
+		return NULL;
+
+	encoder = &amdgpu_encoder->base;
+	encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+
+	drm_encoder_init(
+		dev,
+		&amdgpu_encoder->base,
+		NULL,
+		DRM_MODE_ENCODER_DPMST,
+		NULL);
+
+	drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
+
+	return amdgpu_encoder;
+}
+
+static struct drm_connector *dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+							 struct drm_dp_mst_port *port,
+							 const char *pathprop)
+{
+	struct amdgpu_connector *master = container_of(mgr, struct amdgpu_connector, mst_mgr);
+	struct drm_device *dev = master->base.dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_connector *aconnector;
+	struct drm_connector *connector;
+
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		aconnector = to_amdgpu_connector(connector);
+		if (aconnector->mst_port == master
+				&& !aconnector->port) {
+			DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
+						aconnector, connector->base.id, aconnector->mst_port);
+
+			aconnector->port = port;
+			drm_mode_connector_set_path_property(connector, pathprop);
+
+			drm_modeset_unlock(&dev->mode_config.connection_mutex);
+			return &aconnector->base;
+		}
+	}
+	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+	aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
+	if (!aconnector)
+		return NULL;
+
+	connector = &aconnector->base;
+	aconnector->port = port;
+	aconnector->mst_port = master;
+
+	if (drm_connector_init(
+		dev,
+		connector,
+		&dm_dp_mst_connector_funcs,
+		DRM_MODE_CONNECTOR_DisplayPort)) {
+		kfree(aconnector);
+		return NULL;
+	}
+	drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
+
+	amdgpu_dm_connector_init_helper(
+		&adev->dm,
+		aconnector,
+		DRM_MODE_CONNECTOR_DisplayPort,
+		master->dc_link,
+		master->connector_id);
+
+	aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
+
+	/*
+	 * TODO: understand why this one is needed
+	 */
+	drm_object_attach_property(
+		&connector->base,
+		dev->mode_config.path_property,
+		0);
+	drm_object_attach_property(
+		&connector->base,
+		dev->mode_config.tile_property,
+		0);
+
+	drm_mode_connector_set_path_property(connector, pathprop);
+
+	/*
+	 * Initialize connector state before adding the connectror to drm and
+	 * framebuffer lists
+	 */
+	amdgpu_dm_connector_funcs_reset(connector);
+
+	DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
+			aconnector, connector->base.id, aconnector->mst_port);
+
+	DRM_DEBUG_KMS(":%d\n", connector->base.id);
+
+	return connector;
+}
+
+static void dm_dp_destroy_mst_connector(
+	struct drm_dp_mst_topology_mgr *mgr,
+	struct drm_connector *connector)
+{
+	struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+
+	DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
+				aconnector, connector->base.id, aconnector->mst_port);
+
+	aconnector->port = NULL;
+	if (aconnector->dc_sink) {
+		amdgpu_dm_remove_sink_from_freesync_module(connector);
+		dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
+		dc_sink_release(aconnector->dc_sink);
+		aconnector->dc_sink = NULL;
+	}
+	if (aconnector->edid) {
+		kfree(aconnector->edid);
+		aconnector->edid = NULL;
+	}
+
+	drm_mode_connector_update_edid_property(
+			&aconnector->base,
+			NULL);
+}
+
+static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+	struct amdgpu_connector *master = container_of(mgr, struct amdgpu_connector, mst_mgr);
+	struct drm_device *dev = master->base.dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct drm_connector *connector;
+	struct amdgpu_connector *aconnector;
+	struct edid *edid;
+
+	drm_modeset_lock_all(dev);
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		aconnector = to_amdgpu_connector(connector);
+		if (aconnector->port &&
+				aconnector->port->pdt != DP_PEER_DEVICE_NONE &&
+				aconnector->port->pdt != DP_PEER_DEVICE_MST_BRANCHING &&
+				!aconnector->dc_sink) {
+			/*
+			 * This is plug in case, where port has been created but
+			 * sink hasn't been created yet
+			 */
+			if (!aconnector->edid) {
+				struct dc_sink_init_data init_params = {
+						.link = aconnector->dc_link,
+						.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST};
+				edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
+
+				if (!edid) {
+					drm_mode_connector_update_edid_property(
+						&aconnector->base,
+						NULL);
+					continue;
+				}
+
+				aconnector->edid = edid;
+
+				aconnector->dc_sink = dc_link_add_remote_sink(
+					aconnector->dc_link,
+					(uint8_t *)edid,
+					(edid->extensions + 1) * EDID_LENGTH,
+					&init_params);
+				if (aconnector->dc_sink)
+					amdgpu_dm_add_sink_to_freesync_module(
+							connector,
+							edid);
+
+				dm_restore_drm_connector_state(connector->dev, connector);
+			} else
+				edid = aconnector->edid;
+
+			DRM_DEBUG_KMS("edid retrieved %p\n", edid);
+
+			drm_mode_connector_update_edid_property(
+				&aconnector->base,
+				aconnector->edid);
+		}
+	}
+	drm_modeset_unlock_all(dev);
+
+	schedule_work(&adev->dm.mst_hotplug_work);
+}
+
+static void dm_dp_mst_register_connector(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	int i;
+
+	drm_modeset_lock_all(dev);
+	if (adev->mode_info.rfbdev) {
+		/*Do not add if already registered in past*/
+		for (i = 0; i < adev->mode_info.rfbdev->helper.connector_count; i++) {
+			if (adev->mode_info.rfbdev->helper.connector_info[i]->connector
+					== connector) {
+				drm_modeset_unlock_all(dev);
+				return;
+			}
+		}
+
+		drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
+	}
+	else
+		DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
+
+	drm_modeset_unlock_all(dev);
+
+	drm_connector_register(connector);
+
+}
+
+static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
+	.add_connector = dm_dp_add_mst_connector,
+	.destroy_connector = dm_dp_destroy_mst_connector,
+	.hotplug = dm_dp_mst_hotplug,
+	.register_connector = dm_dp_mst_register_connector
+};
+
+void amdgpu_dm_initialize_mst_connector(
+	struct amdgpu_display_manager *dm,
+	struct amdgpu_connector *aconnector)
+{
+	aconnector->dm_dp_aux.aux.name = "dmdc";
+	aconnector->dm_dp_aux.aux.dev = dm->adev->dev;
+	aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
+	aconnector->dm_dp_aux.link_index = aconnector->connector_id;
+
+	drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
+	aconnector->mst_mgr.cbs = &dm_mst_cbs;
+	drm_dp_mst_topology_mgr_init(
+		&aconnector->mst_mgr,
+		dm->adev->ddev,
+		&aconnector->dm_dp_aux.aux,
+		16,
+		4,
+		aconnector->connector_id);
+}
+

+ 36 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h

@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AMDGPU_DM_MST_TYPES_H__
+#define __DAL_AMDGPU_DM_MST_TYPES_H__
+
+struct amdgpu_display_manager;
+struct amdgpu_connector;
+
+void amdgpu_dm_initialize_mst_connector(
+	struct amdgpu_display_manager *dm,
+	struct amdgpu_connector *aconnector);
+
+#endif

+ 463 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c

@@ -0,0 +1,463 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/acpi.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+#include "amdgpu_dm_types.h"
+#include "amdgpu_pm.h"
+
+#define dm_alloc(size) kzalloc(size, GFP_KERNEL)
+#define dm_realloc(ptr, size) krealloc(ptr, size, GFP_KERNEL)
+#define dm_free(ptr) kfree(ptr)
+
+/******************************************************************************
+ * IRQ Interfaces.
+ *****************************************************************************/
+
+void dal_register_timer_interrupt(
+	struct dc_context *ctx,
+	struct dc_timer_interrupt_params *int_params,
+	interrupt_handler ih,
+	void *args)
+{
+	struct amdgpu_device *adev = ctx->driver_context;
+
+	if (!adev || !int_params) {
+		DRM_ERROR("DM_IRQ: invalid input!\n");
+		return;
+	}
+
+	if (int_params->int_context != INTERRUPT_LOW_IRQ_CONTEXT) {
+		/* only low irq ctx is supported. */
+		DRM_ERROR("DM_IRQ: invalid context: %d!\n",
+				int_params->int_context);
+		return;
+	}
+
+	amdgpu_dm_irq_register_timer(adev, int_params, ih, args);
+}
+
+void dal_isr_acquire_lock(struct dc_context *ctx)
+{
+	/*TODO*/
+}
+
+void dal_isr_release_lock(struct dc_context *ctx)
+{
+	/*TODO*/
+}
+
+/******************************************************************************
+ * End-of-IRQ Interfaces.
+ *****************************************************************************/
+
+bool dm_get_platform_info(struct dc_context *ctx,
+			struct platform_info_params *params)
+{
+	/*TODO*/
+	return false;
+}
+
+bool dm_write_persistent_data(struct dc_context *ctx,
+		const struct dc_sink *sink,
+		const char *module_name,
+		const char *key_name,
+		void *params,
+		unsigned int size,
+		struct persistent_data_flag *flag)
+{
+	/*TODO implement*/
+	return false;
+}
+
+bool dm_read_persistent_data(struct dc_context *ctx,
+				const struct dc_sink *sink,
+				const char *module_name,
+				const char *key_name,
+				void *params,
+				unsigned int size,
+				struct persistent_data_flag *flag)
+{
+	/*TODO implement*/
+	return false;
+}
+
+void dm_delay_in_microseconds(struct dc_context *ctx,
+					unsigned int microSeconds)
+{
+	/*TODO implement*/
+	return;
+}
+
+/**** power component interfaces ****/
+
+bool dm_pp_pre_dce_clock_change(
+		struct dc_context *ctx,
+		struct dm_pp_gpu_clock_range *requested_state,
+		struct dm_pp_gpu_clock_range *actual_state)
+{
+	/*TODO*/
+	return false;
+}
+
+bool dm_pp_apply_safe_state(
+		const struct dc_context *ctx)
+{
+	struct amdgpu_device *adev = ctx->driver_context;
+
+	if (adev->pm.dpm_enabled) {
+		/* TODO: Does this require PreModeChange event to PPLIB? */
+	}
+
+	return true;
+}
+
+bool dm_pp_apply_display_requirements(
+		const struct dc_context *ctx,
+		const struct dm_pp_display_configuration *pp_display_cfg)
+{
+	struct amdgpu_device *adev = ctx->driver_context;
+
+	if (adev->pm.dpm_enabled) {
+
+		memset(&adev->pm.pm_display_cfg, 0,
+				sizeof(adev->pm.pm_display_cfg));
+
+		adev->pm.pm_display_cfg.cpu_cc6_disable =
+			pp_display_cfg->cpu_cc6_disable;
+
+		adev->pm.pm_display_cfg.cpu_pstate_disable =
+			pp_display_cfg->cpu_pstate_disable;
+
+		adev->pm.pm_display_cfg.cpu_pstate_separation_time =
+			pp_display_cfg->cpu_pstate_separation_time;
+
+		adev->pm.pm_display_cfg.nb_pstate_switch_disable =
+			pp_display_cfg->nb_pstate_switch_disable;
+
+		adev->pm.pm_display_cfg.num_display =
+				pp_display_cfg->display_count;
+		adev->pm.pm_display_cfg.num_path_including_non_display =
+				pp_display_cfg->display_count;
+
+		adev->pm.pm_display_cfg.min_core_set_clock =
+				pp_display_cfg->min_engine_clock_khz/10;
+		adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
+				pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
+		adev->pm.pm_display_cfg.min_mem_set_clock =
+				pp_display_cfg->min_memory_clock_khz/10;
+
+		adev->pm.pm_display_cfg.multi_monitor_in_sync =
+				pp_display_cfg->all_displays_in_sync;
+		adev->pm.pm_display_cfg.min_vblank_time =
+				pp_display_cfg->avail_mclk_switch_time_us;
+
+		adev->pm.pm_display_cfg.display_clk =
+				pp_display_cfg->disp_clk_khz/10;
+
+		adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
+				pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
+
+		adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
+		adev->pm.pm_display_cfg.line_time_in_us =
+				pp_display_cfg->line_time_in_us;
+
+		adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
+		adev->pm.pm_display_cfg.crossfire_display_index = -1;
+		adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
+
+		/* TODO: complete implementation of
+		 * amd_powerplay_display_configuration_change().
+		 * Follow example of:
+		 * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
+		 * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
+		amd_powerplay_display_configuration_change(
+				adev->powerplay.pp_handle,
+				&adev->pm.pm_display_cfg);
+
+		/* TODO: replace by a separate call to 'apply display cfg'? */
+		amdgpu_pm_compute_clocks(adev);
+	}
+
+	return true;
+}
+
+bool dc_service_get_system_clocks_range(
+		const struct dc_context *ctx,
+		struct dm_pp_gpu_clock_range *sys_clks)
+{
+	struct amdgpu_device *adev = ctx->driver_context;
+
+	/* Default values, in case PPLib is not compiled-in. */
+	sys_clks->mclk.max_khz = 800000;
+	sys_clks->mclk.min_khz = 800000;
+
+	sys_clks->sclk.max_khz = 600000;
+	sys_clks->sclk.min_khz = 300000;
+
+	if (adev->pm.dpm_enabled) {
+		sys_clks->mclk.max_khz = amdgpu_dpm_get_mclk(adev, false);
+		sys_clks->mclk.min_khz = amdgpu_dpm_get_mclk(adev, true);
+
+		sys_clks->sclk.max_khz = amdgpu_dpm_get_sclk(adev, false);
+		sys_clks->sclk.min_khz = amdgpu_dpm_get_sclk(adev, true);
+	}
+
+	return true;
+}
+
+static void get_default_clock_levels(
+		enum dm_pp_clock_type clk_type,
+		struct dm_pp_clock_levels *clks)
+{
+	uint32_t disp_clks_in_khz[6] = {
+			300000, 400000, 496560, 626090, 685720, 757900 };
+	uint32_t sclks_in_khz[6] = {
+			300000, 360000, 423530, 514290, 626090, 720000 };
+	uint32_t mclks_in_khz[2] = { 333000, 800000 };
+
+	switch (clk_type) {
+	case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+		clks->num_levels = 6;
+		memmove(clks->clocks_in_khz, disp_clks_in_khz,
+				sizeof(disp_clks_in_khz));
+		break;
+	case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+		clks->num_levels = 6;
+		memmove(clks->clocks_in_khz, sclks_in_khz,
+				sizeof(sclks_in_khz));
+		break;
+	case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+		clks->num_levels = 2;
+		memmove(clks->clocks_in_khz, mclks_in_khz,
+				sizeof(mclks_in_khz));
+		break;
+	default:
+		clks->num_levels = 0;
+		break;
+	}
+}
+
+static enum amd_pp_clock_type dc_to_pp_clock_type(
+		enum dm_pp_clock_type dm_pp_clk_type)
+{
+	enum amd_pp_clock_type amd_pp_clk_type = 0;
+
+	switch (dm_pp_clk_type) {
+	case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+		amd_pp_clk_type = amd_pp_disp_clock;
+		break;
+	case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+		amd_pp_clk_type = amd_pp_sys_clock;
+		break;
+	case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+		amd_pp_clk_type = amd_pp_mem_clock;
+		break;
+	default:
+		DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
+				dm_pp_clk_type);
+		break;
+	}
+
+	return amd_pp_clk_type;
+}
+
+static void pp_to_dc_clock_levels(
+		const struct amd_pp_clocks *pp_clks,
+		struct dm_pp_clock_levels *dc_clks,
+		enum dm_pp_clock_type dc_clk_type)
+{
+	uint32_t i;
+
+	if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
+		DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+				DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+				pp_clks->count,
+				DM_PP_MAX_CLOCK_LEVELS);
+
+		dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+	} else
+		dc_clks->num_levels = pp_clks->count;
+
+	DRM_INFO("DM_PPLIB: values for %s clock\n",
+			DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+	for (i = 0; i < dc_clks->num_levels; i++) {
+		DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
+		/* translate 10kHz to kHz */
+		dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
+	}
+}
+
+bool dm_pp_get_clock_levels_by_type(
+		const struct dc_context *ctx,
+		enum dm_pp_clock_type clk_type,
+		struct dm_pp_clock_levels *dc_clks)
+{
+	struct amdgpu_device *adev = ctx->driver_context;
+	void *pp_handle = adev->powerplay.pp_handle;
+	struct amd_pp_clocks pp_clks = { 0 };
+	struct amd_pp_simple_clock_info validation_clks = { 0 };
+	uint32_t i;
+
+	if (amd_powerplay_get_clock_by_type(pp_handle,
+			dc_to_pp_clock_type(clk_type), &pp_clks)) {
+		/* Error in pplib. Provide default values. */
+		get_default_clock_levels(clk_type, dc_clks);
+		return true;
+	}
+
+	pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
+
+	if (amd_powerplay_get_display_mode_validation_clocks(pp_handle,
+			&validation_clks)) {
+		/* Error in pplib. Provide default values. */
+		DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
+		validation_clks.engine_max_clock = 72000;
+		validation_clks.memory_max_clock = 80000;
+		validation_clks.level = 0;
+	}
+
+	DRM_INFO("DM_PPLIB: Validation clocks:\n");
+	DRM_INFO("DM_PPLIB:    engine_max_clock: %d\n",
+			validation_clks.engine_max_clock);
+	DRM_INFO("DM_PPLIB:    memory_max_clock: %d\n",
+			validation_clks.memory_max_clock);
+	DRM_INFO("DM_PPLIB:    level           : %d\n",
+			validation_clks.level);
+
+	/* Translate 10 kHz to kHz. */
+	validation_clks.engine_max_clock *= 10;
+	validation_clks.memory_max_clock *= 10;
+
+	/* Determine the highest non-boosted level from the Validation Clocks */
+	if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
+		for (i = 0; i < dc_clks->num_levels; i++) {
+			if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
+				/* This clock is higher the validation clock.
+				 * Than means the previous one is the highest
+				 * non-boosted one. */
+				DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
+						dc_clks->num_levels, i + 1);
+				dc_clks->num_levels = i;
+				break;
+			}
+		}
+	} else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
+		for (i = 0; i < dc_clks->num_levels; i++) {
+			if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
+				DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
+						dc_clks->num_levels, i + 1);
+				dc_clks->num_levels = i;
+				break;
+			}
+		}
+	}
+
+	return true;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_latency(
+	const struct dc_context *ctx,
+	enum dm_pp_clock_type clk_type,
+	struct dm_pp_clock_levels_with_latency *clk_level_info)
+{
+	/* TODO: to be implemented */
+	return false;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_voltage(
+	const struct dc_context *ctx,
+	enum dm_pp_clock_type clk_type,
+	struct dm_pp_clock_levels_with_voltage *clk_level_info)
+{
+	/* TODO: to be implemented */
+	return false;
+}
+
+bool dm_pp_notify_wm_clock_changes(
+	const struct dc_context *ctx,
+	struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
+{
+	/* TODO: to be implemented */
+	return false;
+}
+
+bool dm_pp_apply_power_level_change_request(
+	const struct dc_context *ctx,
+	struct dm_pp_power_level_change_request *level_change_req)
+{
+	/* TODO: to be implemented */
+	return false;
+}
+
+bool dm_pp_apply_clock_for_voltage_request(
+	const struct dc_context *ctx,
+	struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
+{
+	/* TODO: to be implemented */
+	return false;
+}
+
+bool dm_pp_get_static_clocks(
+	const struct dc_context *ctx,
+	struct dm_pp_static_clock_info *static_clk_info)
+{
+	/* TODO: to be implemented */
+	return false;
+}
+
+/**** end of power component interfaces ****/
+
+/* Calls to notification */
+
+void dal_notify_setmode_complete(struct dc_context *ctx,
+	uint32_t h_total,
+	uint32_t v_total,
+	uint32_t h_active,
+	uint32_t v_active,
+	uint32_t pix_clk_in_khz)
+{
+	/*TODO*/
+}
+/* End of calls to notification */
+
+long dm_get_pid(void)
+{
+	return current->pid;
+}
+
+long dm_get_tgid(void)
+{
+	return current->tgid;
+}

+ 3150 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c

@@ -0,0 +1,3150 @@
+/*
+ * Copyright 2012-13 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/version.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_edid.h>
+
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "dm_services_types.h"
+
+// We need to #undef FRAME_SIZE and DEPRECATED because they conflict
+// with ptrace-abi.h's #define's of them.
+#undef FRAME_SIZE
+#undef DEPRECATED
+
+#include "dc.h"
+
+#include "amdgpu_dm_types.h"
+#include "amdgpu_dm_mst_types.h"
+
+#include "modules/inc/mod_freesync.h"
+
+struct dm_connector_state {
+	struct drm_connector_state base;
+
+	enum amdgpu_rmx_type scaling;
+	uint8_t underscan_vborder;
+	uint8_t underscan_hborder;
+	bool underscan_enable;
+};
+
+#define to_dm_connector_state(x)\
+	container_of((x), struct dm_connector_state, base)
+
+
+void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+	kfree(encoder);
+}
+
+static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+	.destroy = amdgpu_dm_encoder_destroy,
+};
+
+static void dm_set_cursor(
+	struct amdgpu_crtc *amdgpu_crtc,
+	uint64_t gpu_addr,
+	uint32_t width,
+	uint32_t height)
+{
+	struct dc_cursor_attributes attributes;
+	amdgpu_crtc->cursor_width = width;
+	amdgpu_crtc->cursor_height = height;
+
+	attributes.address.high_part = upper_32_bits(gpu_addr);
+	attributes.address.low_part  = lower_32_bits(gpu_addr);
+	attributes.width             = width;
+	attributes.height            = height;
+	attributes.x_hot             = 0;
+	attributes.y_hot             = 0;
+	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
+	attributes.rotation_angle    = 0;
+	attributes.attribute_flags.value = 0;
+
+	if (!dc_target_set_cursor_attributes(
+				amdgpu_crtc->target,
+				&attributes)) {
+		DRM_ERROR("DC failed to set cursor attributes\n");
+	}
+}
+
+static int dm_crtc_unpin_cursor_bo_old(
+	struct amdgpu_crtc *amdgpu_crtc)
+{
+	struct amdgpu_bo *robj;
+	int ret = 0;
+
+	if (NULL != amdgpu_crtc && NULL != amdgpu_crtc->cursor_bo) {
+		robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+
+		ret = amdgpu_bo_reserve(robj, false);
+
+		if (likely(ret == 0)) {
+			ret = amdgpu_bo_unpin(robj);
+
+			if (unlikely(ret != 0)) {
+				DRM_ERROR(
+					"%s: unpin failed (ret=%d), bo %p\n",
+					__func__,
+					ret,
+					amdgpu_crtc->cursor_bo);
+			}
+
+			amdgpu_bo_unreserve(robj);
+		} else {
+			DRM_ERROR(
+				"%s: reserve failed (ret=%d), bo %p\n",
+				__func__,
+				ret,
+				amdgpu_crtc->cursor_bo);
+		}
+
+		drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+		amdgpu_crtc->cursor_bo = NULL;
+	}
+
+	return ret;
+}
+
+static int dm_crtc_pin_cursor_bo_new(
+	struct drm_crtc *crtc,
+	struct drm_file *file_priv,
+	uint32_t handle,
+	struct amdgpu_bo **ret_obj)
+{
+	struct amdgpu_crtc *amdgpu_crtc;
+	struct amdgpu_bo *robj;
+	struct drm_gem_object *obj;
+	int ret = -EINVAL;
+
+	if (NULL != crtc) {
+		struct drm_device *dev = crtc->dev;
+		struct amdgpu_device *adev = dev->dev_private;
+		uint64_t gpu_addr;
+
+		amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+		obj = drm_gem_object_lookup(file_priv, handle);
+
+		if (!obj) {
+			DRM_ERROR(
+				"Cannot find cursor object %x for crtc %d\n",
+				handle,
+				amdgpu_crtc->crtc_id);
+			goto release;
+		}
+		robj = gem_to_amdgpu_bo(obj);
+
+		ret  = amdgpu_bo_reserve(robj, false);
+
+		if (unlikely(ret != 0)) {
+			drm_gem_object_unreference_unlocked(obj);
+		DRM_ERROR("dm_crtc_pin_cursor_bo_new ret %x, handle %x\n",
+				 ret, handle);
+			goto release;
+		}
+
+		ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, 0,
+						adev->mc.visible_vram_size,
+						&gpu_addr);
+
+		if (ret == 0) {
+			amdgpu_crtc->cursor_addr = gpu_addr;
+			*ret_obj  = robj;
+		}
+		amdgpu_bo_unreserve(robj);
+		if (ret)
+			drm_gem_object_unreference_unlocked(obj);
+
+	}
+release:
+
+	return ret;
+}
+
+static int dm_crtc_cursor_set(
+	struct drm_crtc *crtc,
+	struct drm_file *file_priv,
+	uint32_t handle,
+	uint32_t width,
+	uint32_t height)
+{
+	struct amdgpu_bo *new_cursor_bo;
+	struct dc_cursor_position position;
+
+	int ret;
+
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	ret		= EINVAL;
+	new_cursor_bo	= NULL;
+
+	DRM_DEBUG_KMS(
+	"%s: crtc_id=%d with handle %d and size %d to %d, bo_object %p\n",
+		__func__,
+		amdgpu_crtc->crtc_id,
+		handle,
+		width,
+		height,
+		amdgpu_crtc->cursor_bo);
+
+	if (!handle) {
+		/* turn off cursor */
+		position.enable = false;
+		position.x = 0;
+		position.y = 0;
+		position.hot_spot_enable = false;
+
+		if (amdgpu_crtc->target) {
+			/*set cursor visible false*/
+			dc_target_set_cursor_position(
+				amdgpu_crtc->target,
+				&position);
+		}
+		/*unpin old cursor buffer and update cache*/
+		ret = dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
+		goto release;
+
+	}
+
+	if ((width > amdgpu_crtc->max_cursor_width) ||
+		(height > amdgpu_crtc->max_cursor_height)) {
+		DRM_ERROR(
+			"%s: bad cursor width or height %d x %d\n",
+			__func__,
+			width,
+			height);
+		goto release;
+	}
+	/*try to pin new cursor bo*/
+	ret = dm_crtc_pin_cursor_bo_new(crtc, file_priv, handle, &new_cursor_bo);
+	/*if map not successful then return an error*/
+	if (ret)
+		goto release;
+
+	/*program new cursor bo to hardware*/
+	dm_set_cursor(amdgpu_crtc, amdgpu_crtc->cursor_addr, width, height);
+
+	/*un map old, not used anymore cursor bo ,
+	 * return memory and mapping back */
+	dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
+
+	/*assign new cursor bo to our internal cache*/
+	amdgpu_crtc->cursor_bo = &new_cursor_bo->gem_base;
+
+release:
+	return ret;
+
+}
+
+static int dm_crtc_cursor_move(struct drm_crtc *crtc,
+				     int x, int y)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	int xorigin = 0, yorigin = 0;
+	struct dc_cursor_position position;
+
+	/* avivo cursor are offset into the total surface */
+	x += crtc->primary->state->src_x >> 16;
+	y += crtc->primary->state->src_y >> 16;
+
+	/*
+	 * TODO: for cursor debugging unguard the following
+	 */
+#if 0
+	DRM_DEBUG_KMS(
+		"%s: x %d y %d c->x %d c->y %d\n",
+		__func__,
+		x,
+		y,
+		crtc->x,
+		crtc->y);
+#endif
+
+	if (x < 0) {
+		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+		x = 0;
+	}
+	if (y < 0) {
+		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+		y = 0;
+	}
+
+	position.enable = true;
+	position.x = x;
+	position.y = y;
+
+	position.hot_spot_enable = true;
+	position.x_hotspot = xorigin;
+	position.y_hotspot = yorigin;
+
+	if (amdgpu_crtc->target) {
+		if (!dc_target_set_cursor_position(
+					amdgpu_crtc->target,
+					&position)) {
+			DRM_ERROR("DC failed to set cursor position\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void dm_crtc_cursor_reset(struct drm_crtc *crtc)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	DRM_DEBUG_KMS(
+		"%s: with cursor_bo %p\n",
+		__func__,
+		amdgpu_crtc->cursor_bo);
+
+	if (amdgpu_crtc->cursor_bo && amdgpu_crtc->target) {
+		dm_set_cursor(
+		amdgpu_crtc,
+		amdgpu_crtc->cursor_addr,
+		amdgpu_crtc->cursor_width,
+		amdgpu_crtc->cursor_height);
+	}
+}
+static bool fill_rects_from_plane_state(
+	const struct drm_plane_state *state,
+	struct dc_surface *surface)
+{
+	surface->src_rect.x = state->src_x >> 16;
+	surface->src_rect.y = state->src_y >> 16;
+	/*we ignore for now mantissa and do not to deal with floating pixels :(*/
+	surface->src_rect.width = state->src_w >> 16;
+
+	if (surface->src_rect.width == 0)
+		return false;
+
+	surface->src_rect.height = state->src_h >> 16;
+	if (surface->src_rect.height == 0)
+		return false;
+
+	surface->dst_rect.x = state->crtc_x;
+	surface->dst_rect.y = state->crtc_y;
+
+	if (state->crtc_w == 0)
+		return false;
+
+	surface->dst_rect.width = state->crtc_w;
+
+	if (state->crtc_h == 0)
+		return false;
+
+	surface->dst_rect.height = state->crtc_h;
+
+	surface->clip_rect = surface->dst_rect;
+
+	switch (state->rotation & DRM_MODE_ROTATE_MASK) {
+	case DRM_MODE_ROTATE_0:
+		surface->rotation = ROTATION_ANGLE_0;
+		break;
+	case DRM_MODE_ROTATE_90:
+		surface->rotation = ROTATION_ANGLE_90;
+		break;
+	case DRM_MODE_ROTATE_180:
+		surface->rotation = ROTATION_ANGLE_180;
+		break;
+	case DRM_MODE_ROTATE_270:
+		surface->rotation = ROTATION_ANGLE_270;
+		break;
+	default:
+		surface->rotation = ROTATION_ANGLE_0;
+		break;
+	}
+
+	return true;
+}
+static bool get_fb_info(
+	const struct amdgpu_framebuffer *amdgpu_fb,
+	uint64_t *tiling_flags,
+	uint64_t *fb_location)
+{
+	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+	int r = amdgpu_bo_reserve(rbo, false);
+	if (unlikely(r != 0)){
+		DRM_ERROR("Unable to reserve buffer\n");
+		return false;
+	}
+
+	if (fb_location)
+		*fb_location = amdgpu_bo_gpu_offset(rbo);
+
+	if (tiling_flags)
+		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
+
+	amdgpu_bo_unreserve(rbo);
+
+	return true;
+}
+static void fill_plane_attributes_from_fb(
+	struct dc_surface *surface,
+	const struct amdgpu_framebuffer *amdgpu_fb, bool addReq)
+{
+	uint64_t tiling_flags;
+	uint64_t fb_location = 0;
+	const struct drm_framebuffer *fb = &amdgpu_fb->base;
+	struct drm_format_name_buf format_name;
+
+	get_fb_info(
+		amdgpu_fb,
+		&tiling_flags,
+		addReq == true ? &fb_location:NULL);
+
+	surface->address.type                = PLN_ADDR_TYPE_GRAPHICS;
+	surface->address.grph.addr.low_part  = lower_32_bits(fb_location);
+	surface->address.grph.addr.high_part = upper_32_bits(fb_location);
+
+	switch (fb->format->format) {
+	case DRM_FORMAT_C8:
+		surface->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
+		break;
+	case DRM_FORMAT_RGB565:
+		surface->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
+		break;
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+		break;
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010:
+		surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
+		break;
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ABGR2101010:
+		surface->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
+		break;
+	default:
+		DRM_ERROR("Unsupported screen format %s\n",
+		          drm_get_format_name(fb->format->format, &format_name));
+		return;
+	}
+
+	memset(&surface->tiling_info, 0, sizeof(surface->tiling_info));
+
+	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1)
+	{
+		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
+
+		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
+		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
+		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
+		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
+		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+
+		/* XXX fix me for VI */
+		surface->tiling_info.gfx8.num_banks = num_banks;
+		surface->tiling_info.gfx8.array_mode =
+				DC_ARRAY_2D_TILED_THIN1;
+		surface->tiling_info.gfx8.tile_split = tile_split;
+		surface->tiling_info.gfx8.bank_width = bankw;
+		surface->tiling_info.gfx8.bank_height = bankh;
+		surface->tiling_info.gfx8.tile_aspect = mtaspect;
+		surface->tiling_info.gfx8.tile_mode =
+				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
+	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
+			== DC_ARRAY_1D_TILED_THIN1) {
+		surface->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
+	}
+
+	surface->tiling_info.gfx8.pipe_config =
+			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
+
+	surface->plane_size.grph.surface_size.x = 0;
+	surface->plane_size.grph.surface_size.y = 0;
+	surface->plane_size.grph.surface_size.width = fb->width;
+	surface->plane_size.grph.surface_size.height = fb->height;
+	surface->plane_size.grph.surface_pitch =
+		fb->pitches[0] / fb->format->cpp[0];
+
+	surface->visible = true;
+	surface->scaling_quality.h_taps_c = 0;
+	surface->scaling_quality.v_taps_c = 0;
+
+	/* TODO: unhardcode */
+	surface->color_space = COLOR_SPACE_SRGB;
+	/* is this needed? is surface zeroed at allocation? */
+	surface->scaling_quality.h_taps = 0;
+	surface->scaling_quality.v_taps = 0;
+	surface->stereo_format = PLANE_STEREO_FORMAT_NONE;
+
+}
+
+#define NUM_OF_RAW_GAMMA_RAMP_RGB_256 256
+
+static void fill_gamma_from_crtc(
+	const struct drm_crtc *crtc,
+	struct dc_surface *dc_surface)
+{
+	int i;
+	struct dc_gamma *gamma;
+	struct drm_crtc_state *state = crtc->state;
+	struct drm_color_lut *lut = (struct drm_color_lut *) state->gamma_lut->data;
+
+	gamma = dc_create_gamma();
+
+	if (gamma == NULL)
+		return;
+
+	for (i = 0; i < NUM_OF_RAW_GAMMA_RAMP_RGB_256; i++) {
+		gamma->gamma_ramp_rgb256x3x16.red[i] = lut[i].red;
+		gamma->gamma_ramp_rgb256x3x16.green[i] = lut[i].green;
+		gamma->gamma_ramp_rgb256x3x16.blue[i] = lut[i].blue;
+	}
+
+	gamma->type = GAMMA_RAMP_RBG256X3X16;
+	gamma->size = sizeof(gamma->gamma_ramp_rgb256x3x16);
+
+	dc_surface->gamma_correction = gamma;
+}
+
+static void fill_plane_attributes(
+			struct dc_surface *surface,
+			struct drm_plane_state *state, bool addrReq)
+{
+	const struct amdgpu_framebuffer *amdgpu_fb =
+		to_amdgpu_framebuffer(state->fb);
+	const struct drm_crtc *crtc = state->crtc;
+
+	fill_rects_from_plane_state(state, surface);
+	fill_plane_attributes_from_fb(
+		surface,
+		amdgpu_fb,
+		addrReq);
+
+	/* In case of gamma set, update gamma value */
+	if (state->crtc->state->gamma_lut) {
+		fill_gamma_from_crtc(crtc, surface);
+	}
+}
+
+/*****************************************************************************/
+
+struct amdgpu_connector *aconnector_from_drm_crtc_id(
+		const struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_connector *connector;
+	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+	struct amdgpu_connector *aconnector;
+
+	list_for_each_entry(connector,
+			&dev->mode_config.connector_list, head)	{
+
+		aconnector = to_amdgpu_connector(connector);
+
+		if (aconnector->base.state->crtc != &acrtc->base)
+			continue;
+
+		/* Found the connector */
+		return aconnector;
+	}
+
+	/* If we get here, not found. */
+	return NULL;
+}
+
+static void update_stream_scaling_settings(
+		const struct drm_display_mode *mode,
+		const struct dm_connector_state *dm_state,
+		const struct dc_stream *stream)
+{
+	struct amdgpu_device *adev = dm_state->base.crtc->dev->dev_private;
+	enum amdgpu_rmx_type rmx_type;
+
+	struct rect src = { 0 }; /* viewport in target space*/
+	struct rect dst = { 0 }; /* stream addressable area */
+
+	/* Full screen scaling by default */
+	src.width = mode->hdisplay;
+	src.height = mode->vdisplay;
+	dst.width = stream->timing.h_addressable;
+	dst.height = stream->timing.v_addressable;
+
+	rmx_type = dm_state->scaling;
+	if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
+		if (src.width * dst.height <
+				src.height * dst.width) {
+			/* height needs less upscaling/more downscaling */
+			dst.width = src.width *
+					dst.height / src.height;
+		} else {
+			/* width needs less upscaling/more downscaling */
+			dst.height = src.height *
+					dst.width / src.width;
+		}
+	} else if (rmx_type == RMX_CENTER) {
+		dst = src;
+	}
+
+	dst.x = (stream->timing.h_addressable - dst.width) / 2;
+	dst.y = (stream->timing.v_addressable - dst.height) / 2;
+
+	if (dm_state->underscan_enable) {
+		dst.x += dm_state->underscan_hborder / 2;
+		dst.y += dm_state->underscan_vborder / 2;
+		dst.width -= dm_state->underscan_hborder;
+		dst.height -= dm_state->underscan_vborder;
+	}
+
+	adev->dm.dc->stream_funcs.stream_update_scaling(adev->dm.dc, stream, &src, &dst);
+
+	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
+			dst.x, dst.y, dst.width, dst.height);
+
+}
+
+static void dm_dc_surface_commit(
+		struct dc *dc,
+		struct drm_crtc *crtc)
+{
+	struct dc_surface *dc_surface;
+	const struct dc_surface *dc_surfaces[1];
+	const struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+	struct dc_target *dc_target = acrtc->target;
+
+	if (!dc_target) {
+		dm_error(
+			"%s: Failed to obtain target on crtc (%d)!\n",
+			__func__,
+			acrtc->crtc_id);
+		goto fail;
+	}
+
+	dc_surface = dc_create_surface(dc);
+
+	if (!dc_surface) {
+		dm_error(
+			"%s: Failed to create a surface!\n",
+			__func__);
+		goto fail;
+	}
+
+	/* Surface programming */
+	fill_plane_attributes(dc_surface, crtc->primary->state, true);
+
+	dc_surfaces[0] = dc_surface;
+
+	if (false == dc_commit_surfaces_to_target(
+			dc,
+			dc_surfaces,
+			1,
+			dc_target)) {
+		dm_error(
+			"%s: Failed to attach surface!\n",
+			__func__);
+	}
+
+	dc_surface_release(dc_surface);
+fail:
+	return;
+}
+
+static enum dc_color_depth convert_color_depth_from_display_info(
+		const struct drm_connector *connector)
+{
+	uint32_t bpc = connector->display_info.bpc;
+
+	/* Limited color depth to 8bit
+	 * TODO: Still need to handle deep color*/
+	if (bpc > 8)
+		bpc = 8;
+
+	switch (bpc) {
+	case 0:
+		/* Temporary Work around, DRM don't parse color depth for
+		 * EDID revision before 1.4
+		 * TODO: Fix edid parsing
+		 */
+		return COLOR_DEPTH_888;
+	case 6:
+		return COLOR_DEPTH_666;
+	case 8:
+		return COLOR_DEPTH_888;
+	case 10:
+		return COLOR_DEPTH_101010;
+	case 12:
+		return COLOR_DEPTH_121212;
+	case 14:
+		return COLOR_DEPTH_141414;
+	case 16:
+		return COLOR_DEPTH_161616;
+	default:
+		return COLOR_DEPTH_UNDEFINED;
+	}
+}
+
+static enum dc_aspect_ratio get_aspect_ratio(
+		const struct drm_display_mode *mode_in)
+{
+	int32_t width = mode_in->crtc_hdisplay * 9;
+	int32_t height = mode_in->crtc_vdisplay * 16;
+	if ((width - height) < 10 && (width - height) > -10)
+		return ASPECT_RATIO_16_9;
+	else
+		return ASPECT_RATIO_4_3;
+}
+
+static enum dc_color_space get_output_color_space(
+				const struct dc_crtc_timing *dc_crtc_timing)
+{
+	enum dc_color_space color_space = COLOR_SPACE_SRGB;
+
+	switch (dc_crtc_timing->pixel_encoding)	{
+	case PIXEL_ENCODING_YCBCR422:
+	case PIXEL_ENCODING_YCBCR444:
+	case PIXEL_ENCODING_YCBCR420:
+	{
+		/*
+		 * 27030khz is the separation point between HDTV and SDTV
+		 * according to HDMI spec, we use YCbCr709 and YCbCr601
+		 * respectively
+		 */
+		if (dc_crtc_timing->pix_clk_khz > 27030) {
+			if (dc_crtc_timing->flags.Y_ONLY)
+				color_space =
+					COLOR_SPACE_YCBCR709_LIMITED;
+			else
+				color_space = COLOR_SPACE_YCBCR709;
+		} else {
+			if (dc_crtc_timing->flags.Y_ONLY)
+				color_space =
+					COLOR_SPACE_YCBCR601_LIMITED;
+			else
+				color_space = COLOR_SPACE_YCBCR601;
+		}
+
+	}
+	break;
+	case PIXEL_ENCODING_RGB:
+		color_space = COLOR_SPACE_SRGB;
+		break;
+
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	return color_space;
+}
+
+/*****************************************************************************/
+
+static void fill_stream_properties_from_drm_display_mode(
+	struct dc_stream *stream,
+	const struct drm_display_mode *mode_in,
+	const struct drm_connector *connector)
+{
+	struct dc_crtc_timing *timing_out = &stream->timing;
+	memset(timing_out, 0, sizeof(struct dc_crtc_timing));
+
+	timing_out->h_border_left = 0;
+	timing_out->h_border_right = 0;
+	timing_out->v_border_top = 0;
+	timing_out->v_border_bottom = 0;
+	/* TODO: un-hardcode */
+
+	if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
+			&& stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
+	else
+		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
+
+	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
+	timing_out->display_color_depth = convert_color_depth_from_display_info(
+			connector);
+	timing_out->scan_type = SCANNING_TYPE_NODATA;
+	timing_out->hdmi_vic = 0;
+	timing_out->vic = drm_match_cea_mode(mode_in);
+
+	timing_out->h_addressable = mode_in->crtc_hdisplay;
+	timing_out->h_total = mode_in->crtc_htotal;
+	timing_out->h_sync_width =
+		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
+	timing_out->h_front_porch =
+		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
+	timing_out->v_total = mode_in->crtc_vtotal;
+	timing_out->v_addressable = mode_in->crtc_vdisplay;
+	timing_out->v_front_porch =
+		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
+	timing_out->v_sync_width =
+		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
+	timing_out->pix_clk_khz = mode_in->crtc_clock;
+	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
+	if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
+		timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
+	if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
+		timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
+
+	stream->output_color_space = get_output_color_space(timing_out);
+
+}
+
+static void fill_audio_info(
+	struct audio_info *audio_info,
+	const struct drm_connector *drm_connector,
+	const struct dc_sink *dc_sink)
+{
+	int i = 0;
+	int cea_revision = 0;
+	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
+
+	audio_info->manufacture_id = edid_caps->manufacturer_id;
+	audio_info->product_id = edid_caps->product_id;
+
+	cea_revision = drm_connector->display_info.cea_rev;
+
+	while (i < AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS &&
+		edid_caps->display_name[i]) {
+		audio_info->display_name[i] = edid_caps->display_name[i];
+		i++;
+	}
+
+	if(cea_revision >= 3) {
+		audio_info->mode_count = edid_caps->audio_mode_count;
+
+		for (i = 0; i < audio_info->mode_count; ++i) {
+			audio_info->modes[i].format_code =
+					(enum audio_format_code)
+					(edid_caps->audio_modes[i].format_code);
+			audio_info->modes[i].channel_count =
+					edid_caps->audio_modes[i].channel_count;
+			audio_info->modes[i].sample_rates.all =
+					edid_caps->audio_modes[i].sample_rate;
+			audio_info->modes[i].sample_size =
+					edid_caps->audio_modes[i].sample_size;
+		}
+	}
+
+	audio_info->flags.all = edid_caps->speaker_flags;
+
+	/* TODO: We only check for the progressive mode, check for interlace mode too */
+	if(drm_connector->latency_present[0]) {
+		audio_info->video_latency = drm_connector->video_latency[0];
+		audio_info->audio_latency = drm_connector->audio_latency[0];
+	}
+
+	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
+
+}
+
+static void copy_crtc_timing_for_drm_display_mode(
+		const struct drm_display_mode *src_mode,
+		struct drm_display_mode *dst_mode)
+{
+	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
+	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
+	dst_mode->crtc_clock = src_mode->crtc_clock;
+	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
+	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
+	dst_mode->crtc_hsync_start=  src_mode->crtc_hsync_start;
+	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
+	dst_mode->crtc_htotal = src_mode->crtc_htotal;
+	dst_mode->crtc_hskew = src_mode->crtc_hskew;
+	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;;
+	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;;
+	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;;
+	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;;
+	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;;
+}
+
+static void decide_crtc_timing_for_drm_display_mode(
+		struct drm_display_mode *drm_mode,
+		const struct drm_display_mode *native_mode,
+		bool scale_enabled)
+{
+	if (scale_enabled) {
+		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+	} else if (native_mode->clock == drm_mode->clock &&
+			native_mode->htotal == drm_mode->htotal &&
+			native_mode->vtotal == drm_mode->vtotal) {
+		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+	} else {
+		/* no scaling nor amdgpu inserted, no need to patch */
+	}
+}
+
+static struct dc_target *create_target_for_sink(
+		const struct amdgpu_connector *aconnector,
+		const struct drm_display_mode *drm_mode,
+		const struct dm_connector_state *dm_state)
+{
+	struct drm_display_mode *preferred_mode = NULL;
+	const struct drm_connector *drm_connector;
+	struct dc_target *target = NULL;
+	struct dc_stream *stream;
+	struct drm_display_mode mode = *drm_mode;
+	bool native_mode_found = false;
+
+	if (NULL == aconnector) {
+		DRM_ERROR("aconnector is NULL!\n");
+		goto drm_connector_null;
+	}
+
+	if (NULL == dm_state) {
+		DRM_ERROR("dm_state is NULL!\n");
+		goto dm_state_null;
+	}
+
+	drm_connector = &aconnector->base;
+	stream = dc_create_stream_for_sink(aconnector->dc_sink);
+
+	if (NULL == stream) {
+		DRM_ERROR("Failed to create stream for sink!\n");
+		goto stream_create_fail;
+	}
+
+	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
+		/* Search for preferred mode */
+		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
+			native_mode_found = true;
+			break;
+		}
+	}
+	if (!native_mode_found)
+		preferred_mode = list_first_entry_or_null(
+				&aconnector->base.modes,
+				struct drm_display_mode,
+				head);
+
+	if (NULL == preferred_mode) {
+		/* This may not be an error, the use case is when we we have no
+		 * usermode calls to reset and set mode upon hotplug. In this
+		 * case, we call set mode ourselves to restore the previous mode
+		 * and the modelist may not be filled in in time.
+		 */
+		DRM_INFO("No preferred mode found\n");
+	} else {
+		decide_crtc_timing_for_drm_display_mode(
+				&mode, preferred_mode,
+				dm_state->scaling != RMX_OFF);
+	}
+
+	fill_stream_properties_from_drm_display_mode(stream,
+			&mode, &aconnector->base);
+	update_stream_scaling_settings(&mode, dm_state, stream);
+
+	fill_audio_info(
+		&stream->audio_info,
+		drm_connector,
+		aconnector->dc_sink);
+
+	target = dc_create_target_for_streams(&stream, 1);
+	dc_stream_release(stream);
+
+	if (NULL == target) {
+		DRM_ERROR("Failed to create target with streams!\n");
+		goto target_create_fail;
+	}
+
+dm_state_null:
+drm_connector_null:
+target_create_fail:
+stream_create_fail:
+	return target;
+}
+
+void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
+{
+	drm_crtc_cleanup(crtc);
+	kfree(crtc);
+}
+
+/* Implemented only the options currently availible for the driver */
+static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
+	.reset = drm_atomic_helper_crtc_reset,
+	.cursor_set = dm_crtc_cursor_set,
+	.cursor_move = dm_crtc_cursor_move,
+	.destroy = amdgpu_dm_crtc_destroy,
+	.gamma_set = drm_atomic_helper_legacy_gamma_set,
+	.set_config = drm_atomic_helper_set_config,
+	.page_flip = drm_atomic_helper_page_flip,
+	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+	bool connected;
+	struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+
+	/* Notes:
+	 * 1. This interface is NOT called in context of HPD irq.
+	 * 2. This interface *is called* in context of user-mode ioctl. Which
+	 * makes it a bad place for *any* MST-related activit. */
+
+	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+		connected = (aconnector->dc_sink != NULL);
+	else
+		connected = (aconnector->base.force == DRM_FORCE_ON);
+
+	return (connected ? connector_status_connected :
+			connector_status_disconnected);
+}
+
+int amdgpu_dm_connector_atomic_set_property(
+	struct drm_connector *connector,
+	struct drm_connector_state *connector_state,
+	struct drm_property *property,
+	uint64_t val)
+{
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct dm_connector_state *dm_old_state =
+		to_dm_connector_state(connector->state);
+	struct dm_connector_state *dm_new_state =
+		to_dm_connector_state(connector_state);
+
+	struct drm_crtc_state *new_crtc_state;
+	struct drm_crtc *crtc;
+	int i;
+	int ret = -EINVAL;
+
+	if (property == dev->mode_config.scaling_mode_property) {
+		enum amdgpu_rmx_type rmx_type;
+
+		switch (val) {
+		case DRM_MODE_SCALE_CENTER:
+			rmx_type = RMX_CENTER;
+			break;
+		case DRM_MODE_SCALE_ASPECT:
+			rmx_type = RMX_ASPECT;
+			break;
+		case DRM_MODE_SCALE_FULLSCREEN:
+			rmx_type = RMX_FULL;
+			break;
+		case DRM_MODE_SCALE_NONE:
+		default:
+			rmx_type = RMX_OFF;
+			break;
+		}
+
+		if (dm_old_state->scaling == rmx_type)
+			return 0;
+
+		dm_new_state->scaling = rmx_type;
+		ret = 0;
+	} else if (property == adev->mode_info.underscan_hborder_property) {
+		dm_new_state->underscan_hborder = val;
+		ret = 0;
+	} else if (property == adev->mode_info.underscan_vborder_property) {
+		dm_new_state->underscan_vborder = val;
+		ret = 0;
+	} else if (property == adev->mode_info.underscan_property) {
+		dm_new_state->underscan_enable = val;
+		ret = 0;
+	}
+
+	for_each_crtc_in_state(
+		connector_state->state,
+		crtc,
+		new_crtc_state,
+		i) {
+
+		if (crtc == connector_state->crtc) {
+			struct drm_plane_state *plane_state;
+
+			/*
+			 * Bit of magic done here. We need to ensure
+			 * that planes get update after mode is set.
+			 * So, we need to add primary plane to state,
+			 * and this way atomic_update would be called
+			 * for it
+			 */
+			plane_state =
+				drm_atomic_get_plane_state(
+					connector_state->state,
+					crtc->primary);
+
+			if (!plane_state)
+				return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+void amdgpu_dm_connector_destroy(struct drm_connector *connector)
+{
+	struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+	const struct dc_link *link = aconnector->dc_link;
+	struct amdgpu_device *adev = connector->dev->dev_private;
+	struct amdgpu_display_manager *dm = &adev->dm;
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+	if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
+		amdgpu_dm_register_backlight_device(dm);
+
+		if (dm->backlight_dev) {
+			backlight_device_unregister(dm->backlight_dev);
+			dm->backlight_dev = NULL;
+		}
+
+	}
+#endif
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
+{
+	struct dm_connector_state *state =
+		to_dm_connector_state(connector->state);
+
+	kfree(state);
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+	if (state) {
+		state->scaling = RMX_OFF;
+		state->underscan_enable = false;
+		state->underscan_hborder = 0;
+		state->underscan_vborder = 0;
+
+		connector->state = &state->base;
+		connector->state->connector = connector;
+	}
+}
+
+struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state(
+	struct drm_connector *connector)
+{
+	struct dm_connector_state *state =
+		to_dm_connector_state(connector->state);
+
+	struct dm_connector_state *new_state =
+			kmemdup(state, sizeof(*state), GFP_KERNEL);
+
+	if (new_state) {
+		__drm_atomic_helper_connector_duplicate_state(connector,
+								      &new_state->base);
+		return &new_state->base;
+	}
+
+	return NULL;
+}
+
+static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
+	.reset = amdgpu_dm_connector_funcs_reset,
+	.detect = amdgpu_dm_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = amdgpu_dm_connector_destroy,
+	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.atomic_set_property = amdgpu_dm_connector_atomic_set_property
+};
+
+static struct drm_encoder *best_encoder(struct drm_connector *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+
+	DRM_DEBUG_KMS("Finding the best encoder\n");
+
+	/* pick the encoder ids */
+	if (enc_id) {
+		obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+		if (!obj) {
+			DRM_ERROR("Couldn't find a matching encoder for our connector\n");
+			return NULL;
+		}
+		encoder = obj_to_encoder(obj);
+		return encoder;
+	}
+	DRM_ERROR("No encoder id\n");
+	return NULL;
+}
+
+static int get_modes(struct drm_connector *connector)
+{
+	return amdgpu_dm_connector_get_modes(connector);
+}
+
+static void create_eml_sink(struct amdgpu_connector *aconnector)
+{
+	struct dc_sink_init_data init_params = {
+			.link = aconnector->dc_link,
+			.sink_signal = SIGNAL_TYPE_VIRTUAL
+	};
+	struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+
+	if (!aconnector->base.edid_blob_ptr ||
+		!aconnector->base.edid_blob_ptr->data) {
+		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
+				aconnector->base.name);
+
+		aconnector->base.force = DRM_FORCE_OFF;
+		aconnector->base.override_edid = false;
+		return;
+	}
+
+	aconnector->edid = edid;
+
+	aconnector->dc_em_sink = dc_link_add_remote_sink(
+		aconnector->dc_link,
+		(uint8_t *)edid,
+		(edid->extensions + 1) * EDID_LENGTH,
+		&init_params);
+
+	if (aconnector->base.force
+					== DRM_FORCE_ON)
+		aconnector->dc_sink = aconnector->dc_link->local_sink ?
+		aconnector->dc_link->local_sink :
+		aconnector->dc_em_sink;
+}
+
+static void handle_edid_mgmt(struct amdgpu_connector *aconnector)
+{
+	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
+
+	/* In case of headless boot with force on for DP managed connector
+	 * Those settings have to be != 0 to get initial modeset
+	 */
+	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
+		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
+	}
+
+
+	aconnector->base.override_edid = true;
+	create_eml_sink(aconnector);
+}
+
+int amdgpu_dm_connector_mode_valid(
+		struct drm_connector *connector,
+		struct drm_display_mode *mode)
+{
+	int result = MODE_ERROR;
+	const struct dc_sink *dc_sink;
+	struct amdgpu_device *adev = connector->dev->dev_private;
+	struct dc_validation_set val_set = { 0 };
+	/* TODO: Unhardcode stream count */
+	struct dc_stream *streams[1];
+	struct dc_target *target;
+	struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+
+	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
+		return result;
+
+	/* Only run this the first time mode_valid is called to initilialize
+	 * EDID mgmt
+	 */
+	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
+		!aconnector->dc_em_sink)
+		handle_edid_mgmt(aconnector);
+
+	dc_sink = to_amdgpu_connector(connector)->dc_sink;
+
+	if (NULL == dc_sink) {
+		DRM_ERROR("dc_sink is NULL!\n");
+		goto stream_create_fail;
+	}
+
+	streams[0] = dc_create_stream_for_sink(dc_sink);
+
+	if (NULL == streams[0]) {
+		DRM_ERROR("Failed to create stream for sink!\n");
+		goto stream_create_fail;
+	}
+
+	drm_mode_set_crtcinfo(mode, 0);
+	fill_stream_properties_from_drm_display_mode(streams[0], mode, connector);
+
+	target = dc_create_target_for_streams(streams, 1);
+	val_set.target = target;
+
+	if (NULL == val_set.target) {
+		DRM_ERROR("Failed to create target with stream!\n");
+		goto target_create_fail;
+	}
+
+	val_set.surface_count = 0;
+	streams[0]->src.width = mode->hdisplay;
+	streams[0]->src.height = mode->vdisplay;
+	streams[0]->dst = streams[0]->src;
+
+	if (dc_validate_resources(adev->dm.dc, &val_set, 1))
+		result = MODE_OK;
+
+	dc_target_release(target);
+target_create_fail:
+	dc_stream_release(streams[0]);
+stream_create_fail:
+	/* TODO: error handling*/
+	return result;
+}
+
+static const struct drm_connector_helper_funcs
+amdgpu_dm_connector_helper_funcs = {
+	/*
+	* If hotplug a second bigger display in FB Con mode, bigger resolution
+	* modes will be filtered by drm_mode_validate_size(), and those modes
+	* is missing after user start lightdm. So we need to renew modes list.
+	* in get_modes call back, not just return the modes count
+	*/
+	.get_modes = get_modes,
+	.mode_valid = amdgpu_dm_connector_mode_valid,
+	.best_encoder = best_encoder
+};
+
+static void dm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+}
+
+static int dm_crtc_helper_atomic_check(
+	struct drm_crtc *crtc,
+	struct drm_crtc_state *state)
+{
+	return 0;
+}
+
+static bool dm_crtc_helper_mode_fixup(
+	struct drm_crtc *crtc,
+	const struct drm_display_mode *mode,
+	struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
+	.disable = dm_crtc_helper_disable,
+	.atomic_check = dm_crtc_helper_atomic_check,
+	.mode_fixup = dm_crtc_helper_mode_fixup
+};
+
+static void dm_encoder_helper_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static int dm_encoder_helper_atomic_check(
+	struct drm_encoder *encoder,
+	struct drm_crtc_state *crtc_state,
+	struct drm_connector_state *conn_state)
+{
+	return 0;
+}
+
+const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
+	.disable = dm_encoder_helper_disable,
+	.atomic_check = dm_encoder_helper_atomic_check
+};
+
+static const struct drm_plane_funcs dm_plane_funcs = {
+	.reset = drm_atomic_helper_plane_reset,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state
+};
+
+static void clear_unrelated_fields(struct drm_plane_state *state)
+{
+	state->crtc = NULL;
+	state->fb = NULL;
+	state->state = NULL;
+	state->fence = NULL;
+}
+
+static bool page_flip_needed(
+	const struct drm_plane_state *new_state,
+	const struct drm_plane_state *old_state,
+	struct drm_pending_vblank_event *event,
+	bool commit_surface_required)
+{
+	struct drm_plane_state old_state_tmp;
+	struct drm_plane_state new_state_tmp;
+
+	struct amdgpu_framebuffer *amdgpu_fb_old;
+	struct amdgpu_framebuffer *amdgpu_fb_new;
+	struct amdgpu_crtc *acrtc_new;
+
+	uint64_t old_tiling_flags;
+	uint64_t new_tiling_flags;
+
+	bool page_flip_required;
+
+	if (!old_state)
+		return false;
+
+	if (!old_state->fb)
+		return false;
+
+	if (!new_state)
+		return false;
+
+	if (!new_state->fb)
+		return false;
+
+	old_state_tmp = *old_state;
+	new_state_tmp = *new_state;
+
+	if (!event)
+		return false;
+
+	amdgpu_fb_old = to_amdgpu_framebuffer(old_state->fb);
+	amdgpu_fb_new = to_amdgpu_framebuffer(new_state->fb);
+
+	if (!get_fb_info(amdgpu_fb_old, &old_tiling_flags, NULL))
+		return false;
+
+	if (!get_fb_info(amdgpu_fb_new, &new_tiling_flags, NULL))
+		return false;
+
+	if (commit_surface_required == true &&
+	    old_tiling_flags != new_tiling_flags)
+		return false;
+
+	clear_unrelated_fields(&old_state_tmp);
+	clear_unrelated_fields(&new_state_tmp);
+
+	page_flip_required = memcmp(&old_state_tmp,
+				    &new_state_tmp,
+				    sizeof(old_state_tmp)) == 0 ? true:false;
+	if (new_state->crtc && page_flip_required == false) {
+		acrtc_new = to_amdgpu_crtc(new_state->crtc);
+		if (acrtc_new->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
+			page_flip_required = true;
+	}
+	return page_flip_required;
+}
+
+static int dm_plane_helper_prepare_fb(
+	struct drm_plane *plane,
+	struct drm_plane_state *new_state)
+{
+	struct amdgpu_framebuffer *afb;
+	struct drm_gem_object *obj;
+	struct amdgpu_bo *rbo;
+	int r;
+
+	if (!new_state->fb) {
+		DRM_DEBUG_KMS("No FB bound\n");
+		return 0;
+	}
+
+	afb = to_amdgpu_framebuffer(new_state->fb);
+
+	obj = afb->obj;
+	rbo = gem_to_amdgpu_bo(obj);
+	r = amdgpu_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+
+	r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
+
+	amdgpu_bo_unreserve(rbo);
+
+	if (unlikely(r != 0)) {
+		DRM_ERROR("Failed to pin framebuffer\n");
+		return r;
+	}
+
+	return 0;
+}
+
+static void dm_plane_helper_cleanup_fb(
+	struct drm_plane *plane,
+	struct drm_plane_state *old_state)
+{
+	struct amdgpu_bo *rbo;
+	struct amdgpu_framebuffer *afb;
+	int r;
+
+	if (!old_state->fb)
+		return;
+
+	afb = to_amdgpu_framebuffer(old_state->fb);
+	rbo = gem_to_amdgpu_bo(afb->obj);
+	r = amdgpu_bo_reserve(rbo, false);
+	if (unlikely(r)) {
+		DRM_ERROR("failed to reserve rbo before unpin\n");
+		return;
+	} else {
+		amdgpu_bo_unpin(rbo);
+		amdgpu_bo_unreserve(rbo);
+	}
+}
+
+int dm_create_validation_set_for_target(struct drm_connector *connector,
+		struct drm_display_mode *mode, struct dc_validation_set *val_set)
+{
+	int result = MODE_ERROR;
+	const struct dc_sink *dc_sink =
+			to_amdgpu_connector(connector)->dc_sink;
+	/* TODO: Unhardcode stream count */
+	struct dc_stream *streams[1];
+	struct dc_target *target;
+
+	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
+		return result;
+
+	if (NULL == dc_sink) {
+		DRM_ERROR("dc_sink is NULL!\n");
+		return result;
+	}
+
+	streams[0] = dc_create_stream_for_sink(dc_sink);
+
+	if (NULL == streams[0]) {
+		DRM_ERROR("Failed to create stream for sink!\n");
+		return result;
+	}
+
+	drm_mode_set_crtcinfo(mode, 0);
+
+	fill_stream_properties_from_drm_display_mode(streams[0], mode, connector);
+
+	target = dc_create_target_for_streams(streams, 1);
+	val_set->target = target;
+
+	if (NULL == val_set->target) {
+		DRM_ERROR("Failed to create target with stream!\n");
+		goto fail;
+	}
+
+	streams[0]->src.width = mode->hdisplay;
+	streams[0]->src.height = mode->vdisplay;
+	streams[0]->dst = streams[0]->src;
+
+	return MODE_OK;
+
+fail:
+	dc_stream_release(streams[0]);
+	return result;
+
+}
+
+static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
+	.prepare_fb = dm_plane_helper_prepare_fb,
+	.cleanup_fb = dm_plane_helper_cleanup_fb,
+};
+
+/*
+ * TODO: these are currently initialized to rgb formats only.
+ * For future use cases we should either initialize them dynamically based on
+ * plane capabilities, or initialize this array to all formats, so internal drm
+ * check will succeed, and let DC to implement proper check
+ */
+static uint32_t rgb_formats[] = {
+	DRM_FORMAT_XRGB4444,
+	DRM_FORMAT_ARGB4444,
+	DRM_FORMAT_RGBA4444,
+	DRM_FORMAT_ARGB1555,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_RGB888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_RGBA8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_ARGB2101010,
+	DRM_FORMAT_ABGR2101010,
+};
+
+int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+			struct amdgpu_crtc *acrtc,
+			uint32_t crtc_index)
+{
+	int res = -ENOMEM;
+
+	struct drm_plane *primary_plane =
+		kzalloc(sizeof(*primary_plane), GFP_KERNEL);
+
+	if (!primary_plane)
+		goto fail_plane;
+
+	primary_plane->format_default = true;
+
+	res = drm_universal_plane_init(
+		dm->adev->ddev,
+		primary_plane,
+		0,
+		&dm_plane_funcs,
+		rgb_formats,
+		ARRAY_SIZE(rgb_formats),
+		NULL,
+		DRM_PLANE_TYPE_PRIMARY, NULL);
+
+	primary_plane->crtc = &acrtc->base;
+
+	drm_plane_helper_add(primary_plane, &dm_plane_helper_funcs);
+
+	res = drm_crtc_init_with_planes(
+			dm->ddev,
+			&acrtc->base,
+			primary_plane,
+			NULL,
+			&amdgpu_dm_crtc_funcs, NULL);
+
+	if (res)
+		goto fail;
+
+	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
+
+	acrtc->max_cursor_width = 128;
+	acrtc->max_cursor_height = 128;
+
+	acrtc->crtc_id = crtc_index;
+	acrtc->base.enabled = false;
+
+	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
+	drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
+
+	return 0;
+fail:
+	kfree(primary_plane);
+fail_plane:
+	acrtc->crtc_id = -1;
+	return res;
+}
+
+static int to_drm_connector_type(enum signal_type st)
+{
+	switch (st) {
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		return DRM_MODE_CONNECTOR_HDMIA;
+	case SIGNAL_TYPE_EDP:
+		return DRM_MODE_CONNECTOR_eDP;
+	case SIGNAL_TYPE_RGB:
+		return DRM_MODE_CONNECTOR_VGA;
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		return DRM_MODE_CONNECTOR_DisplayPort;
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+		return DRM_MODE_CONNECTOR_DVID;
+	case SIGNAL_TYPE_VIRTUAL:
+		return DRM_MODE_CONNECTOR_VIRTUAL;
+
+	default:
+		return DRM_MODE_CONNECTOR_Unknown;
+	}
+}
+
+static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
+{
+	const struct drm_connector_helper_funcs *helper =
+		connector->helper_private;
+	struct drm_encoder *encoder;
+	struct amdgpu_encoder *amdgpu_encoder;
+
+	encoder = helper->best_encoder(connector);
+
+	if (encoder == NULL)
+		return;
+
+	amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+	amdgpu_encoder->native_mode.clock = 0;
+
+	if (!list_empty(&connector->probed_modes)) {
+		struct drm_display_mode *preferred_mode = NULL;
+		list_for_each_entry(preferred_mode,
+				&connector->probed_modes,
+				head) {
+		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
+			amdgpu_encoder->native_mode = *preferred_mode;
+		}
+			break;
+		}
+
+	}
+}
+
+static struct drm_display_mode *amdgpu_dm_create_common_mode(
+		struct drm_encoder *encoder, char *name,
+		int hdisplay, int vdisplay)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_display_mode *mode = NULL;
+	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+
+	mode = drm_mode_duplicate(dev, native_mode);
+
+	if(mode == NULL)
+		return NULL;
+
+	mode->hdisplay = hdisplay;
+	mode->vdisplay = vdisplay;
+	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+	strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+
+	return mode;
+
+}
+
+static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
+					struct drm_connector *connector)
+{
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_display_mode *mode = NULL;
+	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+	struct amdgpu_connector *amdgpu_connector =
+				to_amdgpu_connector(connector);
+	int i;
+	int n;
+	struct mode_size {
+		char name[DRM_DISPLAY_MODE_LEN];
+		int w;
+		int h;
+	}common_modes[] = {
+		{  "640x480",  640,  480},
+		{  "800x600",  800,  600},
+		{ "1024x768", 1024,  768},
+		{ "1280x720", 1280,  720},
+		{ "1280x800", 1280,  800},
+		{"1280x1024", 1280, 1024},
+		{ "1440x900", 1440,  900},
+		{"1680x1050", 1680, 1050},
+		{"1600x1200", 1600, 1200},
+		{"1920x1080", 1920, 1080},
+		{"1920x1200", 1920, 1200}
+	};
+
+	n = sizeof(common_modes) / sizeof(common_modes[0]);
+
+	for (i = 0; i < n; i++) {
+		struct drm_display_mode *curmode = NULL;
+		bool mode_existed = false;
+
+		if (common_modes[i].w > native_mode->hdisplay ||
+			common_modes[i].h > native_mode->vdisplay ||
+			(common_modes[i].w == native_mode->hdisplay &&
+			common_modes[i].h == native_mode->vdisplay))
+				continue;
+
+		list_for_each_entry(curmode, &connector->probed_modes, head) {
+			if (common_modes[i].w == curmode->hdisplay &&
+				common_modes[i].h == curmode->vdisplay) {
+				mode_existed = true;
+				break;
+			}
+		}
+
+		if (mode_existed)
+			continue;
+
+		mode = amdgpu_dm_create_common_mode(encoder,
+				common_modes[i].name, common_modes[i].w,
+				common_modes[i].h);
+		drm_mode_probed_add(connector, mode);
+		amdgpu_connector->num_modes++;
+	}
+}
+
+static void amdgpu_dm_connector_ddc_get_modes(
+	struct drm_connector *connector,
+	struct edid *edid)
+{
+	struct amdgpu_connector *amdgpu_connector =
+			to_amdgpu_connector(connector);
+
+	if (edid) {
+		/* empty probed_modes */
+		INIT_LIST_HEAD(&connector->probed_modes);
+		amdgpu_connector->num_modes =
+				drm_add_edid_modes(connector, edid);
+
+		drm_edid_to_eld(connector, edid);
+
+		amdgpu_dm_get_native_mode(connector);
+	} else
+		amdgpu_connector->num_modes = 0;
+}
+
+int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+{
+	const struct drm_connector_helper_funcs *helper =
+			connector->helper_private;
+	struct amdgpu_connector *amdgpu_connector =
+			to_amdgpu_connector(connector);
+	struct drm_encoder *encoder;
+	struct edid *edid = amdgpu_connector->edid;
+
+	encoder = helper->best_encoder(connector);
+
+	amdgpu_dm_connector_ddc_get_modes(connector, edid);
+	amdgpu_dm_connector_add_common_modes(encoder, connector);
+	return amdgpu_connector->num_modes;
+}
+
+void amdgpu_dm_connector_init_helper(
+	struct amdgpu_display_manager *dm,
+	struct amdgpu_connector *aconnector,
+	int connector_type,
+	const struct dc_link *link,
+	int link_index)
+{
+	struct amdgpu_device *adev = dm->ddev->dev_private;
+
+	aconnector->connector_id = link_index;
+	aconnector->dc_link = link;
+	aconnector->base.interlace_allowed = true;
+	aconnector->base.doublescan_allowed = true;
+	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
+	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
+
+	mutex_init(&aconnector->hpd_lock);
+
+	/*configure suport HPD hot plug connector_>polled default value is 0
+	 * which means HPD hot plug not supported*/
+	switch (connector_type) {
+	case DRM_MODE_CONNECTOR_HDMIA:
+		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+		break;
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+		break;
+	case DRM_MODE_CONNECTOR_DVID:
+		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+		break;
+	default:
+		break;
+	}
+
+	drm_object_attach_property(&aconnector->base.base,
+				dm->ddev->mode_config.scaling_mode_property,
+				DRM_MODE_SCALE_NONE);
+
+	drm_object_attach_property(&aconnector->base.base,
+				adev->mode_info.underscan_property,
+				UNDERSCAN_OFF);
+	drm_object_attach_property(&aconnector->base.base,
+				adev->mode_info.underscan_hborder_property,
+				0);
+	drm_object_attach_property(&aconnector->base.base,
+				adev->mode_info.underscan_vborder_property,
+				0);
+
+}
+
+int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+		      struct i2c_msg *msgs, int num)
+{
+	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
+	struct i2c_command cmd;
+	int i;
+	int result = -EIO;
+
+	cmd.payloads = kzalloc(num * sizeof(struct i2c_payload), GFP_KERNEL);
+
+	if (!cmd.payloads)
+		return result;
+
+	cmd.number_of_payloads = num;
+	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+	cmd.speed = 100;
+
+	for (i = 0; i < num; i++) {
+		cmd.payloads[i].write = (msgs[i].flags & I2C_M_RD);
+		cmd.payloads[i].address = msgs[i].addr;
+		cmd.payloads[i].length = msgs[i].len;
+		cmd.payloads[i].data = msgs[i].buf;
+	}
+
+	if (dc_submit_i2c(i2c->dm->dc, i2c->link_index, &cmd))
+		result = num;
+
+	kfree(cmd.payloads);
+
+	return result;
+}
+
+u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
+	.master_xfer = amdgpu_dm_i2c_xfer,
+	.functionality = amdgpu_dm_i2c_func,
+};
+
+struct amdgpu_i2c_adapter *create_i2c(unsigned int link_index, struct amdgpu_display_manager *dm, int *res)
+{
+	struct amdgpu_i2c_adapter *i2c;
+
+	i2c = kzalloc(sizeof (struct amdgpu_i2c_adapter), GFP_KERNEL);
+	i2c->dm = dm;
+	i2c->base.owner = THIS_MODULE;
+	i2c->base.class = I2C_CLASS_DDC;
+	i2c->base.dev.parent = &dm->adev->pdev->dev;
+	i2c->base.algo = &amdgpu_dm_i2c_algo;
+	snprintf(i2c->base.name, sizeof (i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
+	i2c->link_index = link_index;
+	i2c_set_adapdata(&i2c->base, i2c);
+
+	return i2c;
+}
+
+/* Note: this function assumes that dc_link_detect() was called for the
+ * dc_link which will be represented by this aconnector. */
+int amdgpu_dm_connector_init(
+	struct amdgpu_display_manager *dm,
+	struct amdgpu_connector *aconnector,
+	uint32_t link_index,
+	struct amdgpu_encoder *aencoder)
+{
+	int res = 0;
+	int connector_type;
+	struct dc *dc = dm->dc;
+	const struct dc_link *link = dc_get_link_at_index(dc, link_index);
+	struct amdgpu_i2c_adapter *i2c;
+
+	DRM_DEBUG_KMS("%s()\n", __func__);
+
+	i2c = create_i2c(link->link_index, dm, &res);
+	aconnector->i2c = i2c;
+	res = i2c_add_adapter(&i2c->base);
+
+	if (res) {
+		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+		goto out_free;
+	}
+
+	connector_type = to_drm_connector_type(link->connector_signal);
+
+	res = drm_connector_init(
+			dm->ddev,
+			&aconnector->base,
+			&amdgpu_dm_connector_funcs,
+			connector_type);
+
+	if (res) {
+		DRM_ERROR("connector_init failed\n");
+		aconnector->connector_id = -1;
+		goto out_free;
+	}
+
+	drm_connector_helper_add(
+			&aconnector->base,
+			&amdgpu_dm_connector_helper_funcs);
+
+	amdgpu_dm_connector_init_helper(
+		dm,
+		aconnector,
+		connector_type,
+		link,
+		link_index);
+
+	drm_mode_connector_attach_encoder(
+		&aconnector->base, &aencoder->base);
+
+	drm_connector_register(&aconnector->base);
+
+	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
+		|| connector_type == DRM_MODE_CONNECTOR_eDP)
+		amdgpu_dm_initialize_mst_connector(dm, aconnector);
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+	/* NOTE: this currently will create backlight device even if a panel
+	 * is not connected to the eDP/LVDS connector.
+	 *
+	 * This is less than ideal but we don't have sink information at this
+	 * stage since detection happens after. We can't do detection earlier
+	 * since MST detection needs connectors to be created first.
+	 */
+	if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
+		/* Event if registration failed, we should continue with
+		 * DM initialization because not having a backlight control
+		 * is better then a black screen. */
+		amdgpu_dm_register_backlight_device(dm);
+
+		if (dm->backlight_dev)
+			dm->backlight_link = link;
+	}
+#endif
+
+out_free:
+	if (res) {
+		kfree(i2c);
+		aconnector->i2c = NULL;
+	}
+	return res;
+}
+
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
+{
+	switch (adev->mode_info.num_crtc) {
+	case 1:
+		return 0x1;
+	case 2:
+		return 0x3;
+	case 3:
+		return 0x7;
+	case 4:
+		return 0xf;
+	case 5:
+		return 0x1f;
+	case 6:
+	default:
+		return 0x3f;
+	}
+}
+
+int amdgpu_dm_encoder_init(
+	struct drm_device *dev,
+	struct amdgpu_encoder *aencoder,
+	uint32_t link_index)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+
+	int res = drm_encoder_init(dev,
+				   &aencoder->base,
+				   &amdgpu_dm_encoder_funcs,
+				   DRM_MODE_ENCODER_TMDS,
+				   NULL);
+
+	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+
+	if (!res)
+		aencoder->encoder_id = link_index;
+	else
+		aencoder->encoder_id = -1;
+
+	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
+
+	return res;
+}
+
+enum dm_commit_action {
+	DM_COMMIT_ACTION_NOTHING,
+	DM_COMMIT_ACTION_RESET,
+	DM_COMMIT_ACTION_DPMS_ON,
+	DM_COMMIT_ACTION_DPMS_OFF,
+	DM_COMMIT_ACTION_SET
+};
+
+static enum dm_commit_action get_dm_commit_action(struct drm_crtc_state *state)
+{
+	/* mode changed means either actually mode changed or enabled changed */
+	/* active changed means dpms changed */
+
+	DRM_DEBUG_KMS("crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
+			state->enable,
+			state->active,
+			state->planes_changed,
+			state->mode_changed,
+			state->active_changed,
+			state->connectors_changed);
+
+	if (state->mode_changed) {
+		/* if it is got disabled - call reset mode */
+		if (!state->enable)
+			return DM_COMMIT_ACTION_RESET;
+
+		if (state->active)
+			return DM_COMMIT_ACTION_SET;
+		else
+			return DM_COMMIT_ACTION_RESET;
+	} else {
+		/* ! mode_changed */
+
+		/* if it is remain disable - skip it */
+		if (!state->enable)
+			return DM_COMMIT_ACTION_NOTHING;
+
+		if (state->active && state->connectors_changed)
+			return DM_COMMIT_ACTION_SET;
+
+		if (state->active_changed) {
+			if (state->active) {
+				return DM_COMMIT_ACTION_DPMS_ON;
+			} else {
+				return DM_COMMIT_ACTION_DPMS_OFF;
+			}
+		} else {
+			/* ! active_changed */
+			return DM_COMMIT_ACTION_NOTHING;
+		}
+	}
+}
+
+
+typedef bool (*predicate)(struct amdgpu_crtc *acrtc);
+
+static void wait_while_pflip_status(struct amdgpu_device *adev,
+		struct amdgpu_crtc *acrtc, predicate f) {
+	int count = 0;
+	while (f(acrtc)) {
+		/* Spin Wait*/
+		msleep(1);
+		count++;
+		if (count == 1000) {
+			DRM_ERROR("%s - crtc:%d[%p], pflip_stat:%d, probable hang!\n",
+										__func__, acrtc->crtc_id,
+										acrtc,
+										acrtc->pflip_status);
+
+			/* we do not expect to hit this case except on Polaris with PHY PLL
+			 * 1. DP to HDMI passive dongle connected
+			 * 2. unplug (headless)
+			 * 3. plug in DP
+			 * 3a. on plug in, DP will try verify link by training, and training
+			 * would disable PHY PLL which HDMI rely on to drive TG
+			 * 3b. this will cause flip interrupt cannot be generated, and we
+			 * exit when timeout expired.  however we do not have code to clean
+			 * up flip, flip clean up will happen when the address is written
+			 * with the restore mode change
+			 */
+			WARN_ON(1);
+			break;
+		}
+	}
+
+	DRM_DEBUG_DRIVER("%s - Finished waiting for:%d msec, crtc:%d[%p], pflip_stat:%d \n",
+											__func__,
+											count,
+											acrtc->crtc_id,
+											acrtc,
+											acrtc->pflip_status);
+}
+
+static bool pflip_in_progress_predicate(struct amdgpu_crtc *acrtc)
+{
+	return acrtc->pflip_status != AMDGPU_FLIP_NONE;
+}
+
+static void manage_dm_interrupts(
+	struct amdgpu_device *adev,
+	struct amdgpu_crtc *acrtc,
+	bool enable)
+{
+	/*
+	 * this is not correct translation but will work as soon as VBLANK
+	 * constant is the same as PFLIP
+	 */
+	int irq_type =
+		amdgpu_crtc_idx_to_irq_type(
+			adev,
+			acrtc->crtc_id);
+
+	if (enable) {
+		drm_crtc_vblank_on(&acrtc->base);
+		amdgpu_irq_get(
+			adev,
+			&adev->pageflip_irq,
+			irq_type);
+	} else {
+		wait_while_pflip_status(adev, acrtc,
+				pflip_in_progress_predicate);
+
+		amdgpu_irq_put(
+			adev,
+			&adev->pageflip_irq,
+			irq_type);
+		drm_crtc_vblank_off(&acrtc->base);
+	}
+}
+
+
+static bool pflip_pending_predicate(struct amdgpu_crtc *acrtc)
+{
+	return acrtc->pflip_status == AMDGPU_FLIP_PENDING;
+}
+
+static bool is_scaling_state_different(
+		const struct dm_connector_state *dm_state,
+		const struct dm_connector_state *old_dm_state)
+{
+	if (dm_state->scaling != old_dm_state->scaling)
+		return true;
+	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
+		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
+			return true;
+	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
+		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
+			return true;
+	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder
+				|| dm_state->underscan_vborder != old_dm_state->underscan_vborder)
+			return true;
+	return false;
+}
+
+static void remove_target(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc)
+{
+	int i;
+
+	/*
+	 * we evade vblanks and pflips on crtc that
+	 * should be changed
+	 */
+	manage_dm_interrupts(adev, acrtc, false);
+	/* this is the update mode case */
+	if (adev->dm.freesync_module)
+		for (i = 0; i < acrtc->target->stream_count; i++)
+			mod_freesync_remove_stream(
+					adev->dm.freesync_module,
+					acrtc->target->streams[i]);
+	dc_target_release(acrtc->target);
+	acrtc->target = NULL;
+	acrtc->otg_inst = -1;
+	acrtc->enabled = false;
+}
+
+int amdgpu_dm_atomic_commit(
+	struct drm_device *dev,
+	struct drm_atomic_state *state,
+	bool async)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_display_manager *dm = &adev->dm;
+	struct drm_plane *plane;
+	struct drm_plane_state *new_plane_state;
+	struct drm_plane_state *old_plane_state;
+	uint32_t i, j;
+	int32_t ret = 0;
+	uint32_t commit_targets_count = 0;
+	uint32_t new_crtcs_count = 0;
+	uint32_t flip_crtcs_count = 0;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+
+	struct dc_target *commit_targets[MAX_TARGETS];
+	struct amdgpu_crtc *new_crtcs[MAX_TARGETS];
+	struct dc_target *new_target;
+	struct drm_crtc *flip_crtcs[MAX_TARGETS];
+	struct amdgpu_flip_work *work[MAX_TARGETS] = {0};
+	struct amdgpu_bo *new_abo[MAX_TARGETS] = {0};
+
+	/* In this step all new fb would be pinned */
+
+	/*
+	 * TODO: Revisit when we support true asynchronous commit.
+	 * Right now we receive async commit only from pageflip, in which case
+	 * we should not pin/unpin the fb here, it should be done in
+	 * amdgpu_crtc_flip and from the vblank irq handler.
+	 */
+	if (!async) {
+		ret = drm_atomic_helper_prepare_planes(dev, state);
+		if (ret)
+			return ret;
+	}
+
+	/* Page flip if needed */
+	for_each_plane_in_state(state, plane, new_plane_state, i) {
+		struct drm_plane_state *old_plane_state = plane->state;
+		struct drm_crtc *crtc = new_plane_state->crtc;
+		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+		struct drm_framebuffer *fb = new_plane_state->fb;
+		struct drm_crtc_state *crtc_state;
+
+		if (!fb || !crtc)
+			continue;
+
+		crtc_state = drm_atomic_get_crtc_state(state, crtc);
+
+		if (!crtc_state->planes_changed || !crtc_state->active)
+			continue;
+
+		if (page_flip_needed(
+				new_plane_state,
+				old_plane_state,
+				crtc_state->event,
+				false)) {
+			ret = amdgpu_crtc_prepare_flip(crtc,
+							fb,
+							crtc_state->event,
+							acrtc->flip_flags,
+							drm_crtc_vblank_count(crtc),
+							&work[flip_crtcs_count],
+							&new_abo[flip_crtcs_count]);
+
+			if (ret) {
+				/* According to atomic_commit hook API, EINVAL is not allowed */
+				if (unlikely(ret == -EINVAL))
+					ret = -ENOMEM;
+
+				DRM_ERROR("Atomic commit: Flip for  crtc id %d: [%p], "
+									"failed, errno = %d\n",
+									acrtc->crtc_id,
+									acrtc,
+									ret);
+				/* cleanup all flip configurations which
+				 * succeeded in this commit
+				 */
+				for (i = 0; i < flip_crtcs_count; i++)
+					amdgpu_crtc_cleanup_flip_ctx(
+							work[i],
+							new_abo[i]);
+
+				return ret;
+			}
+
+			flip_crtcs[flip_crtcs_count] = crtc;
+			flip_crtcs_count++;
+		}
+	}
+
+	/*
+	 * This is the point of no return - everything below never fails except
+	 * when the hw goes bonghits. Which means we can commit the new state on
+	 * the software side now.
+	 */
+
+	drm_atomic_helper_swap_state(state, true);
+
+	/*
+	 * From this point state become old state really. New state is
+	 * initialized to appropriate objects and could be accessed from there
+	 */
+
+	/*
+	 * there is no fences usage yet in state. We can skip the following line
+	 * wait_for_fences(dev, state);
+	 */
+
+	drm_atomic_helper_update_legacy_modeset_state(dev, state);
+
+	/* update changed items */
+	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+		struct amdgpu_crtc *acrtc;
+		struct amdgpu_connector *aconnector = NULL;
+		enum dm_commit_action action;
+		struct drm_crtc_state *new_state = crtc->state;
+
+		acrtc = to_amdgpu_crtc(crtc);
+
+		aconnector =
+			amdgpu_dm_find_first_crct_matching_connector(
+				state,
+				crtc,
+				false);
+
+		/* handles headless hotplug case, updating new_state and
+		 * aconnector as needed
+		 */
+
+		action = get_dm_commit_action(new_state);
+
+		switch (action) {
+		case DM_COMMIT_ACTION_DPMS_ON:
+		case DM_COMMIT_ACTION_SET: {
+			struct dm_connector_state *dm_state = NULL;
+			new_target = NULL;
+
+			if (aconnector)
+				dm_state = to_dm_connector_state(aconnector->base.state);
+
+			new_target = create_target_for_sink(
+					aconnector,
+					&crtc->state->mode,
+					dm_state);
+
+			DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
+
+			if (!new_target) {
+				/*
+				 * this could happen because of issues with
+				 * userspace notifications delivery.
+				 * In this case userspace tries to set mode on
+				 * display which is disconnect in fact.
+				 * dc_sink in NULL in this case on aconnector.
+				 * We expect reset mode will come soon.
+				 *
+				 * This can also happen when unplug is done
+				 * during resume sequence ended
+				 *
+				 * In this case, we want to pretend we still
+				 * have a sink to keep the pipe running so that
+				 * hw state is consistent with the sw state
+				 */
+				DRM_DEBUG_KMS("%s: Failed to create new target for crtc %d\n",
+						__func__, acrtc->base.base.id);
+				break;
+			}
+
+			if (acrtc->target)
+				remove_target(adev, acrtc);
+
+			/*
+			 * this loop saves set mode crtcs
+			 * we needed to enable vblanks once all
+			 * resources acquired in dc after dc_commit_targets
+			 */
+			new_crtcs[new_crtcs_count] = acrtc;
+			new_crtcs_count++;
+
+			acrtc->target = new_target;
+			acrtc->enabled = true;
+			acrtc->hw_mode = crtc->state->mode;
+			crtc->hwmode = crtc->state->mode;
+
+			break;
+		}
+
+		case DM_COMMIT_ACTION_NOTHING: {
+			struct dm_connector_state *dm_state = NULL;
+
+			if (!aconnector)
+				break;
+
+			dm_state = to_dm_connector_state(aconnector->base.state);
+
+			/* Scaling update */
+			update_stream_scaling_settings(
+					&crtc->state->mode,
+					dm_state,
+					acrtc->target->streams[0]);
+
+			break;
+		}
+		case DM_COMMIT_ACTION_DPMS_OFF:
+		case DM_COMMIT_ACTION_RESET:
+			DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
+			/* i.e. reset mode */
+			if (acrtc->target)
+				remove_target(adev, acrtc);
+			break;
+		} /* switch() */
+	} /* for_each_crtc_in_state() */
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+
+		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+		if (acrtc->target) {
+			commit_targets[commit_targets_count] = acrtc->target;
+			++commit_targets_count;
+		}
+	}
+
+	/*
+	 * Add streams after required streams from new and replaced targets
+	 * are removed from freesync module
+	 */
+	if (adev->dm.freesync_module) {
+		for (i = 0; i < new_crtcs_count; i++) {
+			struct amdgpu_connector *aconnector = NULL;
+			new_target = new_crtcs[i]->target;
+			aconnector =
+				amdgpu_dm_find_first_crct_matching_connector(
+					state,
+					&new_crtcs[i]->base,
+					false);
+			if (!aconnector) {
+				DRM_INFO(
+						"Atomic commit: Failed to find connector for acrtc id:%d "
+						"skipping freesync init\n",
+						new_crtcs[i]->crtc_id);
+				continue;
+			}
+
+			for (j = 0; j < new_target->stream_count; j++)
+				mod_freesync_add_stream(
+						adev->dm.freesync_module,
+						new_target->streams[j], &aconnector->caps);
+		}
+	}
+
+	/* DC is optimized not to do anything if 'targets' didn't change. */
+	dc_commit_targets(dm->dc, commit_targets, commit_targets_count);
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+		if (acrtc->target != NULL)
+			acrtc->otg_inst =
+				dc_target_get_status(acrtc->target)->primary_otg_inst;
+	}
+
+	/* update planes when needed */
+	for_each_plane_in_state(state, plane, old_plane_state, i) {
+		struct drm_plane_state *plane_state = plane->state;
+		struct drm_crtc *crtc = plane_state->crtc;
+		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+		struct drm_framebuffer *fb = plane_state->fb;
+		struct drm_connector *connector;
+		struct dm_connector_state *dm_state = NULL;
+		enum dm_commit_action action;
+
+		if (!fb || !crtc || !crtc->state->active)
+			continue;
+
+		action = get_dm_commit_action(crtc->state);
+
+		/* Surfaces are created under two scenarios:
+		 * 1. This commit is not a page flip.
+		 * 2. This commit is a page flip, and targets are created.
+		 */
+		if (!page_flip_needed(
+				plane_state,
+				old_plane_state,
+				crtc->state->event, true) ||
+				action == DM_COMMIT_ACTION_DPMS_ON ||
+				action == DM_COMMIT_ACTION_SET) {
+			list_for_each_entry(connector,
+				&dev->mode_config.connector_list, head)	{
+				if (connector->state->crtc == crtc) {
+					dm_state = to_dm_connector_state(
+						connector->state);
+					break;
+				}
+			}
+
+			/*
+			 * This situation happens in the following case:
+			 * we are about to get set mode for connector who's only
+			 * possible crtc (in encoder crtc mask) is used by
+			 * another connector, that is why it will try to
+			 * re-assing crtcs in order to make configuration
+			 * supported. For our implementation we need to make all
+			 * encoders support all crtcs, then this issue will
+			 * never arise again. But to guard code from this issue
+			 * check is left.
+			 *
+			 * Also it should be needed when used with actual
+			 * drm_atomic_commit ioctl in future
+			 */
+			if (!dm_state)
+				continue;
+
+			/*
+			 * if flip is pending (ie, still waiting for fence to return
+			 * before address is submitted) here, we cannot commit_surface
+			 * as commit_surface will pre-maturely write out the future
+			 * address. wait until flip is submitted before proceeding.
+			 */
+			wait_while_pflip_status(adev, acrtc, pflip_pending_predicate);
+
+			dm_dc_surface_commit(dm->dc, crtc);
+		}
+	}
+
+	for (i = 0; i < new_crtcs_count; i++) {
+		/*
+		 * loop to enable interrupts on newly arrived crtc
+		 */
+		struct amdgpu_crtc *acrtc = new_crtcs[i];
+
+		if (adev->dm.freesync_module) {
+			for (j = 0; j < acrtc->target->stream_count; j++)
+				mod_freesync_notify_mode_change(
+						adev->dm.freesync_module,
+						acrtc->target->streams,
+						acrtc->target->stream_count);
+		}
+
+		manage_dm_interrupts(adev, acrtc, true);
+		dm_crtc_cursor_reset(&acrtc->base);
+
+	}
+
+	/* Do actual flip */
+	flip_crtcs_count = 0;
+	for_each_plane_in_state(state, plane, old_plane_state, i) {
+		struct drm_plane_state *plane_state = plane->state;
+		struct drm_crtc *crtc = plane_state->crtc;
+		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+		struct drm_framebuffer *fb = plane_state->fb;
+
+		if (!fb || !crtc || !crtc->state->planes_changed ||
+			!crtc->state->active)
+			continue;
+
+		if (page_flip_needed(
+				plane_state,
+				old_plane_state,
+				crtc->state->event,
+				false)) {
+				amdgpu_crtc_submit_flip(
+							crtc,
+						    fb,
+						    work[flip_crtcs_count],
+						    new_abo[i]);
+				 flip_crtcs_count++;
+			/*clean up the flags for next usage*/
+			acrtc->flip_flags = 0;
+		}
+	}
+
+	/* In this state all old framebuffers would be unpinned */
+
+	/* TODO: Revisit when we support true asynchronous commit.*/
+	if (!async)
+		drm_atomic_helper_cleanup_planes(dev, state);
+
+	drm_atomic_state_put(state);
+
+	return ret;
+}
+/*
+ * This functions handle all cases when set mode does not come upon hotplug.
+ * This include when the same display is unplugged then plugged back into the
+ * same port and when we are running without usermode desktop manager supprot
+ */
+void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector)
+{
+	struct drm_crtc *crtc;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct dc *dc = adev->dm.dc;
+	struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+	struct amdgpu_crtc *disconnected_acrtc;
+	const struct dc_sink *sink;
+	struct dc_target *commit_targets[6];
+	struct dc_target *current_target;
+	uint32_t commit_targets_count = 0;
+	int i;
+
+	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
+		return;
+
+	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+
+	if (!disconnected_acrtc || !disconnected_acrtc->target)
+		return;
+
+	sink = disconnected_acrtc->target->streams[0]->sink;
+
+	/*
+	 * If the previous sink is not released and different from the current,
+	 * we deduce we are in a state where we can not rely on usermode call
+	 * to turn on the display, so we do it here
+	 */
+	if (sink != aconnector->dc_sink) {
+		struct dm_connector_state *dm_state =
+				to_dm_connector_state(aconnector->base.state);
+
+		struct dc_target *new_target =
+			create_target_for_sink(
+				aconnector,
+				&disconnected_acrtc->base.state->mode,
+				dm_state);
+
+		DRM_INFO("Headless hotplug, restoring connector state\n");
+		/*
+		 * we evade vblanks and pflips on crtc that
+		 * should be changed
+		 */
+		manage_dm_interrupts(adev, disconnected_acrtc, false);
+		/* this is the update mode case */
+
+		current_target = disconnected_acrtc->target;
+
+		disconnected_acrtc->target = new_target;
+		disconnected_acrtc->enabled = true;
+		disconnected_acrtc->hw_mode = disconnected_acrtc->base.state->mode;
+
+		commit_targets_count = 0;
+
+		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+			struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+			if (acrtc->target) {
+				commit_targets[commit_targets_count] = acrtc->target;
+				++commit_targets_count;
+			}
+		}
+
+		/* DC is optimized not to do anything if 'targets' didn't change. */
+		if (!dc_commit_targets(dc, commit_targets,
+				commit_targets_count)) {
+			DRM_INFO("Failed to restore connector state!\n");
+			dc_target_release(disconnected_acrtc->target);
+			disconnected_acrtc->target = current_target;
+			manage_dm_interrupts(adev, disconnected_acrtc, true);
+			return;
+		}
+
+		if (adev->dm.freesync_module) {
+
+			for (i = 0; i < current_target->stream_count; i++)
+				mod_freesync_remove_stream(
+						adev->dm.freesync_module,
+						current_target->streams[i]);
+
+			for (i = 0; i < new_target->stream_count; i++)
+				mod_freesync_add_stream(
+						adev->dm.freesync_module,
+						new_target->streams[i],
+						&aconnector->caps);
+		}
+		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+			struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+			if (acrtc->target != NULL) {
+				acrtc->otg_inst =
+					dc_target_get_status(acrtc->target)->primary_otg_inst;
+			}
+		}
+
+		dc_target_release(current_target);
+
+		dm_dc_surface_commit(dc, &disconnected_acrtc->base);
+
+		manage_dm_interrupts(adev, disconnected_acrtc, true);
+		dm_crtc_cursor_reset(&disconnected_acrtc->base);
+
+	}
+}
+
+static uint32_t add_val_sets_surface(
+	struct dc_validation_set *val_sets,
+	uint32_t set_count,
+	const struct dc_target *target,
+	const struct dc_surface *surface)
+{
+	uint32_t i = 0;
+
+	while (i < set_count) {
+		if (val_sets[i].target == target)
+			break;
+		++i;
+	}
+
+	val_sets[i].surfaces[val_sets[i].surface_count] = surface;
+	val_sets[i].surface_count++;
+
+	return val_sets[i].surface_count;
+}
+
+static uint32_t update_in_val_sets_target(
+	struct dc_validation_set *val_sets,
+	struct drm_crtc **crtcs,
+	uint32_t set_count,
+	const struct dc_target *old_target,
+	const struct dc_target *new_target,
+	struct drm_crtc *crtc)
+{
+	uint32_t i = 0;
+
+	while (i < set_count) {
+		if (val_sets[i].target == old_target)
+			break;
+		++i;
+	}
+
+	val_sets[i].target = new_target;
+	crtcs[i] = crtc;
+
+	if (i == set_count) {
+		/* nothing found. add new one to the end */
+		return set_count + 1;
+	}
+
+	return set_count;
+}
+
+static uint32_t remove_from_val_sets(
+	struct dc_validation_set *val_sets,
+	uint32_t set_count,
+	const struct dc_target *target)
+{
+	int i;
+
+	for (i = 0; i < set_count; i++)
+		if (val_sets[i].target == target)
+			break;
+
+	if (i == set_count) {
+		/* nothing found */
+		return set_count;
+	}
+
+	set_count--;
+
+	for (; i < set_count; i++) {
+		val_sets[i] = val_sets[i + 1];
+	}
+
+	return set_count;
+}
+
+int amdgpu_dm_atomic_check(struct drm_device *dev,
+			struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	struct drm_plane *plane;
+	struct drm_plane_state *plane_state;
+	int i, j;
+	int ret;
+	int set_count;
+	int new_target_count;
+	struct dc_validation_set set[MAX_TARGETS] = {{ 0 }};
+	struct dc_target *new_targets[MAX_TARGETS] = { 0 };
+	struct drm_crtc *crtc_set[MAX_TARGETS] = { 0 };
+	struct amdgpu_device *adev = dev->dev_private;
+	struct dc *dc = adev->dm.dc;
+	bool need_to_validate = false;
+
+	ret = drm_atomic_helper_check(dev, state);
+
+	if (ret) {
+		DRM_ERROR("Atomic state validation failed with error :%d !\n",
+				ret);
+		return ret;
+	}
+
+	ret = -EINVAL;
+
+	/* copy existing configuration */
+	new_target_count = 0;
+	set_count = 0;
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+
+		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+		if (acrtc->target) {
+			set[set_count].target = acrtc->target;
+			crtc_set[set_count] = crtc;
+			++set_count;
+		}
+	}
+
+	/* update changed items */
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct amdgpu_crtc *acrtc = NULL;
+		struct amdgpu_connector *aconnector = NULL;
+		enum dm_commit_action action;
+
+		acrtc = to_amdgpu_crtc(crtc);
+
+		aconnector = amdgpu_dm_find_first_crct_matching_connector(state, crtc, true);
+
+		action = get_dm_commit_action(crtc_state);
+
+		switch (action) {
+		case DM_COMMIT_ACTION_DPMS_ON:
+		case DM_COMMIT_ACTION_SET: {
+			struct dc_target *new_target = NULL;
+			struct drm_connector_state *conn_state = NULL;
+			struct dm_connector_state *dm_state = NULL;
+
+			if (aconnector) {
+				conn_state = drm_atomic_get_connector_state(state, &aconnector->base);
+				if (IS_ERR(conn_state))
+					return ret;
+				dm_state = to_dm_connector_state(conn_state);
+			}
+
+			new_target = create_target_for_sink(aconnector, &crtc_state->mode, dm_state);
+
+			/*
+			 * we can have no target on ACTION_SET if a display
+			 * was disconnected during S3, in this case it not and
+			 * error, the OS will be updated after detection, and
+			 * do the right thing on next atomic commit
+			 */
+			if (!new_target) {
+				DRM_DEBUG_KMS("%s: Failed to create new target for crtc %d\n",
+						__func__, acrtc->base.base.id);
+				break;
+			}
+
+			new_targets[new_target_count] = new_target;
+			set_count = update_in_val_sets_target(
+					set,
+					crtc_set,
+					set_count,
+					acrtc->target,
+					new_target,
+					crtc);
+
+			new_target_count++;
+			need_to_validate = true;
+			break;
+		}
+
+		case DM_COMMIT_ACTION_NOTHING: {
+			const struct drm_connector *drm_connector = NULL;
+			struct drm_connector_state *conn_state = NULL;
+			struct dm_connector_state *dm_state = NULL;
+			struct dm_connector_state *old_dm_state = NULL;
+			struct dc_target *new_target;
+
+			if (!aconnector)
+				break;
+
+			for_each_connector_in_state(
+				state, drm_connector, conn_state, j) {
+				if (&aconnector->base == drm_connector)
+					break;
+			}
+
+			old_dm_state = to_dm_connector_state(drm_connector->state);
+			dm_state = to_dm_connector_state(conn_state);
+
+			/* Support underscan adjustment*/
+			if (!is_scaling_state_different(dm_state, old_dm_state))
+				break;
+
+			new_target = create_target_for_sink(aconnector, &crtc_state->mode, dm_state);
+
+			if (!new_target) {
+				DRM_ERROR("%s: Failed to create new target for crtc %d\n",
+						__func__, acrtc->base.base.id);
+				break;
+			}
+
+			new_targets[new_target_count] = new_target;
+			set_count = update_in_val_sets_target(
+					set,
+					crtc_set,
+					set_count,
+					acrtc->target,
+					new_target,
+					crtc);
+
+			new_target_count++;
+			need_to_validate = true;
+
+			break;
+		}
+		case DM_COMMIT_ACTION_DPMS_OFF:
+		case DM_COMMIT_ACTION_RESET:
+			/* i.e. reset mode */
+			if (acrtc->target) {
+				set_count = remove_from_val_sets(
+						set,
+						set_count,
+						acrtc->target);
+			}
+			break;
+		}
+
+		/*
+		 * TODO revisit when removing commit action
+		 * and looking at atomic flags directly
+		 */
+
+		/* commit needs planes right now (for gamma, eg.) */
+		/* TODO rework commit to chack crtc for gamma change */
+		ret = drm_atomic_add_affected_planes(state, crtc);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < set_count; i++) {
+		for_each_plane_in_state(state, plane, plane_state, j) {
+			struct drm_plane_state *old_plane_state = plane->state;
+			struct drm_crtc *crtc = plane_state->crtc;
+			struct drm_framebuffer *fb = plane_state->fb;
+			struct drm_connector *connector;
+			struct dm_connector_state *dm_state = NULL;
+			enum dm_commit_action action;
+			struct drm_crtc_state *crtc_state;
+
+
+			if (!fb || !crtc || crtc_set[i] != crtc ||
+				!crtc->state->planes_changed || !crtc->state->active)
+				continue;
+
+			action = get_dm_commit_action(crtc->state);
+
+			/* Surfaces are created under two scenarios:
+			 * 1. This commit is not a page flip.
+			 * 2. This commit is a page flip, and targets are created.
+			 */
+			crtc_state = drm_atomic_get_crtc_state(state, crtc);
+			if (!page_flip_needed(plane_state, old_plane_state,
+					crtc_state->event, true) ||
+					action == DM_COMMIT_ACTION_DPMS_ON ||
+					action == DM_COMMIT_ACTION_SET) {
+				struct dc_surface *surface;
+
+				list_for_each_entry(connector,
+					&dev->mode_config.connector_list, head)	{
+					if (connector->state->crtc == crtc) {
+						dm_state = to_dm_connector_state(
+							connector->state);
+						break;
+					}
+				}
+
+				/*
+				 * This situation happens in the following case:
+				 * we are about to get set mode for connector who's only
+				 * possible crtc (in encoder crtc mask) is used by
+				 * another connector, that is why it will try to
+				 * re-assing crtcs in order to make configuration
+				 * supported. For our implementation we need to make all
+				 * encoders support all crtcs, then this issue will
+				 * never arise again. But to guard code from this issue
+				 * check is left.
+				 *
+				 * Also it should be needed when used with actual
+				 * drm_atomic_commit ioctl in future
+				 */
+				if (!dm_state)
+					continue;
+
+				surface = dc_create_surface(dc);
+				fill_plane_attributes(
+					surface,
+					plane_state,
+					false);
+
+				add_val_sets_surface(
+							set,
+							set_count,
+							set[i].target,
+							surface);
+
+				need_to_validate = true;
+			}
+		}
+	}
+
+	if (need_to_validate == false || set_count == 0 ||
+		dc_validate_resources(dc, set, set_count))
+		ret = 0;
+
+	for (i = 0; i < set_count; i++) {
+		for (j = 0; j < set[i].surface_count; j++) {
+			dc_surface_release(set[i].surfaces[j]);
+		}
+	}
+	for (i = 0; i < new_target_count; i++)
+		dc_target_release(new_targets[i]);
+
+	if (ret != 0)
+		DRM_ERROR("Atomic check failed.\n");
+
+	return ret;
+}
+
+static bool is_dp_capable_without_timing_msa(
+		struct dc *dc,
+		struct amdgpu_connector *amdgpu_connector)
+{
+	uint8_t dpcd_data;
+	bool capable = false;
+	if (amdgpu_connector->dc_link &&
+	    dc_read_dpcd(dc, amdgpu_connector->dc_link->link_index,
+			 DP_DOWN_STREAM_PORT_COUNT,
+			 &dpcd_data, sizeof(dpcd_data)) )
+		capable = dpcd_data & DP_MSA_TIMING_PAR_IGNORED? true:false;
+
+	return capable;
+}
+void amdgpu_dm_add_sink_to_freesync_module(
+		struct drm_connector *connector,
+		struct edid *edid)
+{
+	int i;
+	uint64_t val_capable;
+	bool edid_check_required;
+	struct detailed_timing *timing;
+	struct detailed_non_pixel *data;
+	struct detailed_data_monitor_range *range;
+	struct amdgpu_connector *amdgpu_connector =
+			to_amdgpu_connector(connector);
+
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	edid_check_required = false;
+	if (!amdgpu_connector->dc_sink) {
+		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
+		return;
+	}
+	if (!adev->dm.freesync_module)
+		return;
+	/*
+	 * if edid non zero restrict freesync only for dp and edp
+	 */
+	if (edid) {
+		if (amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
+			|| amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
+			edid_check_required = is_dp_capable_without_timing_msa(
+						adev->dm.dc,
+						amdgpu_connector);
+		}
+	}
+	val_capable = 0;
+	if (edid_check_required == true && (edid->version > 1 ||
+	   (edid->version == 1 && edid->revision > 1))) {
+		for (i = 0; i < 4; i++) {
+
+			timing	= &edid->detailed_timings[i];
+			data	= &timing->data.other_data;
+			range	= &data->data.range;
+			/*
+			 * Check if monitor has continuous frequency mode
+			 */
+			if (data->type != EDID_DETAIL_MONITOR_RANGE)
+				continue;
+			/*
+			 * Check for flag range limits only. If flag == 1 then
+			 * no additional timing information provided.
+			 * Default GTF, GTF Secondary curve and CVT are not
+			 * supported
+			 */
+			if (range->flags != 1)
+				continue;
+
+			amdgpu_connector->min_vfreq = range->min_vfreq;
+			amdgpu_connector->max_vfreq = range->max_vfreq;
+			amdgpu_connector->pixel_clock_mhz =
+				range->pixel_clock_mhz * 10;
+			break;
+		}
+
+		if (amdgpu_connector->max_vfreq -
+				amdgpu_connector->min_vfreq > 10) {
+			amdgpu_connector->caps.supported = true;
+			amdgpu_connector->caps.min_refresh_in_micro_hz =
+					amdgpu_connector->min_vfreq * 1000000;
+			amdgpu_connector->caps.max_refresh_in_micro_hz =
+					amdgpu_connector->max_vfreq * 1000000;
+				val_capable = 1;
+		}
+	}
+
+	/*
+	 * TODO figure out how to notify user-mode or DRM of freesync caps
+	 * once we figure out how to deal with freesync in an upstreamable
+	 * fashion
+	 */
+
+}
+
+void amdgpu_dm_remove_sink_from_freesync_module(
+		struct drm_connector *connector)
+{
+	/*
+	 * TODO fill in once we figure out how to deal with freesync in
+	 * an upstreamable fashion
+	 */
+}

+ 101 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.h

@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012-13 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_TYPES_H__
+#define __AMDGPU_DM_TYPES_H__
+
+#include <drm/drmP.h>
+
+struct amdgpu_framebuffer;
+struct amdgpu_display_manager;
+struct dc_validation_set;
+struct dc_surface;
+
+/*TODO Jodan Hersen use the one in amdgpu_dm*/
+int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+			struct amdgpu_crtc *amdgpu_crtc,
+			uint32_t link_index);
+int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+			struct amdgpu_connector *amdgpu_connector,
+			uint32_t link_index,
+			struct amdgpu_encoder *amdgpu_encoder);
+int amdgpu_dm_encoder_init(
+	struct drm_device *dev,
+	struct amdgpu_encoder *aencoder,
+	uint32_t link_index);
+
+void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc);
+void amdgpu_dm_connector_destroy(struct drm_connector *connector);
+void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder);
+
+int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+
+int amdgpu_dm_atomic_commit(
+	struct drm_device *dev,
+	struct drm_atomic_state *state,
+	bool async);
+int amdgpu_dm_atomic_check(struct drm_device *dev,
+				struct drm_atomic_state *state);
+
+int dm_create_validation_set_for_target(
+	struct drm_connector *connector,
+	struct drm_display_mode *mode,
+	struct dc_validation_set *val_set);
+
+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
+struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state(
+	struct drm_connector *connector);
+
+int amdgpu_dm_connector_atomic_set_property(
+	struct drm_connector *connector,
+	struct drm_connector_state *state,
+	struct drm_property *property,
+	uint64_t val);
+
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
+
+void amdgpu_dm_connector_init_helper(
+	struct amdgpu_display_manager *dm,
+	struct amdgpu_connector *aconnector,
+	int connector_type,
+	const struct dc_link *link,
+	int link_index);
+
+int amdgpu_dm_connector_mode_valid(
+	struct drm_connector *connector,
+	struct drm_display_mode *mode);
+
+void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector);
+
+void amdgpu_dm_add_sink_to_freesync_module(
+		struct drm_connector *connector,
+		struct edid *edid);
+
+void amdgpu_dm_remove_sink_from_freesync_module(
+		struct drm_connector *connector);
+
+extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
+
+#endif		/* __AMDGPU_DM_TYPES_H__ */

+ 28 - 0
drivers/gpu/drm/amd/display/dc/Makefile

@@ -0,0 +1,28 @@
+#
+# Makefile for Display Core (dc) component.
+#
+
+DC_LIBS = basics bios calcs dce \
+gpio gpu i2caux irq virtual
+
+DC_LIBS += dce112
+DC_LIBS += dce110
+DC_LIBS += dce100
+DC_LIBS += dce80
+
+AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
+
+include $(AMD_DC)
+
+DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_target.o dc_sink.o dc_stream.o \
+dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o
+
+AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE))
+
+AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
+
+AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
+AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
+
+
+

+ 11 - 0
drivers/gpu/drm/amd/display/dc/basics/Makefile

@@ -0,0 +1,11 @@
+#
+# Makefile for the 'utils' sub-component of DAL.
+# It provides the general basic services required by other DAL
+# subcomponents.
+
+BASICS = conversion.o fixpt31_32.o fixpt32_32.o grph_object_id.o \
+	logger.o log_helpers.o register_logger.o signal_types.o vector.o
+
+AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_BASICS)

+ 223 - 0
drivers/gpu/drm/amd/display/dc/basics/conversion.c

@@ -0,0 +1,223 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#define DIVIDER 10000
+
+/* S2D13 value in [-3.00...0.9999] */
+#define S2D13_MIN (-3 * DIVIDER)
+#define S2D13_MAX (3 * DIVIDER)
+
+uint16_t fixed_point_to_int_frac(
+	struct fixed31_32 arg,
+	uint8_t integer_bits,
+	uint8_t fractional_bits)
+{
+	int32_t numerator;
+	int32_t divisor = 1 << fractional_bits;
+
+	uint16_t result;
+
+	uint16_t d = (uint16_t)dal_fixed31_32_floor(
+		dal_fixed31_32_abs(
+			arg));
+
+	if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
+		numerator = (uint16_t)dal_fixed31_32_floor(
+			dal_fixed31_32_mul_int(
+				arg,
+				divisor));
+	else {
+		numerator = dal_fixed31_32_floor(
+			dal_fixed31_32_sub(
+				dal_fixed31_32_from_int(
+					1LL << integer_bits),
+				dal_fixed31_32_recip(
+					dal_fixed31_32_from_int(
+						divisor))));
+	}
+
+	if (numerator >= 0)
+		result = (uint16_t)numerator;
+	else
+		result = (uint16_t)(
+		(1 << (integer_bits + fractional_bits + 1)) + numerator);
+
+	if ((result != 0) && dal_fixed31_32_lt(
+		arg, dal_fixed31_32_zero))
+		result |= 1 << (integer_bits + fractional_bits);
+
+	return result;
+}
+/**
+* convert_float_matrix
+* This converts a double into HW register spec defined format S2D13.
+* @param :
+* @return None
+*/
+void convert_float_matrix(
+	uint16_t *matrix,
+	struct fixed31_32 *flt,
+	uint32_t buffer_size)
+{
+	const struct fixed31_32 min_2_13 =
+		dal_fixed31_32_from_fraction(S2D13_MIN, DIVIDER);
+	const struct fixed31_32 max_2_13 =
+		dal_fixed31_32_from_fraction(S2D13_MAX, DIVIDER);
+	uint32_t i;
+
+	for (i = 0; i < buffer_size; ++i) {
+		uint32_t reg_value =
+				fixed_point_to_int_frac(
+					dal_fixed31_32_clamp(
+						flt[i],
+						min_2_13,
+						max_2_13),
+						2,
+						13);
+
+		matrix[i] = (uint16_t)reg_value;
+	}
+}
+
+static void calculate_adjustments_common(
+	const struct fixed31_32 *ideal_matrix,
+	const struct dc_csc_adjustments *adjustments,
+	struct fixed31_32 *matrix)
+{
+	const struct fixed31_32 sin_hue =
+		dal_fixed31_32_sin(adjustments->hue);
+	const struct fixed31_32 cos_hue =
+		dal_fixed31_32_cos(adjustments->hue);
+
+	const struct fixed31_32 multiplier =
+		dal_fixed31_32_mul(
+			adjustments->contrast,
+			adjustments->saturation);
+
+	matrix[0] = dal_fixed31_32_mul(
+		ideal_matrix[0],
+		adjustments->contrast);
+
+	matrix[1] = dal_fixed31_32_mul(
+		ideal_matrix[1],
+		adjustments->contrast);
+
+	matrix[2] = dal_fixed31_32_mul(
+		ideal_matrix[2],
+		adjustments->contrast);
+
+	matrix[4] = dal_fixed31_32_mul(
+		multiplier,
+		dal_fixed31_32_add(
+			dal_fixed31_32_mul(
+				ideal_matrix[8],
+				sin_hue),
+			dal_fixed31_32_mul(
+				ideal_matrix[4],
+				cos_hue)));
+
+	matrix[5] = dal_fixed31_32_mul(
+		multiplier,
+		dal_fixed31_32_add(
+			dal_fixed31_32_mul(
+				ideal_matrix[9],
+				sin_hue),
+			dal_fixed31_32_mul(
+				ideal_matrix[5],
+				cos_hue)));
+
+	matrix[6] = dal_fixed31_32_mul(
+		multiplier,
+		dal_fixed31_32_add(
+			dal_fixed31_32_mul(
+				ideal_matrix[10],
+				sin_hue),
+			dal_fixed31_32_mul(
+				ideal_matrix[6],
+				cos_hue)));
+
+	matrix[7] = ideal_matrix[7];
+
+	matrix[8] = dal_fixed31_32_mul(
+		multiplier,
+		dal_fixed31_32_sub(
+			dal_fixed31_32_mul(
+				ideal_matrix[8],
+				cos_hue),
+			dal_fixed31_32_mul(
+				ideal_matrix[4],
+				sin_hue)));
+
+	matrix[9] = dal_fixed31_32_mul(
+		multiplier,
+		dal_fixed31_32_sub(
+			dal_fixed31_32_mul(
+				ideal_matrix[9],
+				cos_hue),
+			dal_fixed31_32_mul(
+				ideal_matrix[5],
+				sin_hue)));
+
+	matrix[10] = dal_fixed31_32_mul(
+		multiplier,
+		dal_fixed31_32_sub(
+			dal_fixed31_32_mul(
+				ideal_matrix[10],
+				cos_hue),
+			dal_fixed31_32_mul(
+				ideal_matrix[6],
+				sin_hue)));
+
+	matrix[11] = ideal_matrix[11];
+}
+
+void calculate_adjustments(
+	const struct fixed31_32 *ideal_matrix,
+	const struct dc_csc_adjustments *adjustments,
+	struct fixed31_32 *matrix)
+{
+	calculate_adjustments_common(ideal_matrix, adjustments, matrix);
+
+	matrix[3] = dal_fixed31_32_add(
+		ideal_matrix[3],
+		dal_fixed31_32_mul(
+			adjustments->brightness,
+			dal_fixed31_32_from_fraction(86, 100)));
+}
+
+void calculate_adjustments_y_only(
+	const struct fixed31_32 *ideal_matrix,
+	const struct dc_csc_adjustments *adjustments,
+	struct fixed31_32 *matrix)
+{
+	calculate_adjustments_common(ideal_matrix, adjustments, matrix);
+
+	matrix[3] = dal_fixed31_32_add(
+		ideal_matrix[3],
+		adjustments->brightness);
+}
+

+ 51 - 0
drivers/gpu/drm/amd/display/dc/basics/conversion.h

@@ -0,0 +1,51 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_CONVERSION_H__
+#define __DAL_CONVERSION_H__
+
+#include "include/fixed31_32.h"
+
+uint16_t fixed_point_to_int_frac(
+	struct fixed31_32 arg,
+	uint8_t integer_bits,
+	uint8_t fractional_bits);
+
+void convert_float_matrix(
+	uint16_t *matrix,
+	struct fixed31_32 *flt,
+	uint32_t buffer_size);
+
+void calculate_adjustments(
+	const struct fixed31_32 *ideal_matrix,
+	const struct dc_csc_adjustments *adjustments,
+	struct fixed31_32 *matrix);
+
+void calculate_adjustments_y_only(
+	const struct fixed31_32 *ideal_matrix,
+	const struct dc_csc_adjustments *adjustments,
+	struct fixed31_32 *matrix);
+
+#endif

+ 691 - 0
drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c

@@ -0,0 +1,691 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/fixed31_32.h"
+
+static inline uint64_t abs_i64(
+	int64_t arg)
+{
+	if (arg > 0)
+		return (uint64_t)arg;
+	else
+		return (uint64_t)(-arg);
+}
+
+/*
+ * @brief
+ * result = dividend / divisor
+ * *remainder = dividend % divisor
+ */
+static inline uint64_t complete_integer_division_u64(
+	uint64_t dividend,
+	uint64_t divisor,
+	uint64_t *remainder)
+{
+	uint64_t result;
+
+	ASSERT(divisor);
+
+	result = div64_u64_rem(dividend, divisor, remainder);
+
+	return result;
+}
+
+#define BITS_PER_FRACTIONAL_PART \
+	32
+
+#define FRACTIONAL_PART_MASK \
+	((1ULL << BITS_PER_FRACTIONAL_PART) - 1)
+
+#define GET_INTEGER_PART(x) \
+	((x) >> BITS_PER_FRACTIONAL_PART)
+
+#define GET_FRACTIONAL_PART(x) \
+	(FRACTIONAL_PART_MASK & (x))
+
+struct fixed31_32 dal_fixed31_32_from_fraction(
+	int64_t numerator,
+	int64_t denominator)
+{
+	struct fixed31_32 res;
+
+	bool arg1_negative = numerator < 0;
+	bool arg2_negative = denominator < 0;
+
+	uint64_t arg1_value = arg1_negative ? -numerator : numerator;
+	uint64_t arg2_value = arg2_negative ? -denominator : denominator;
+
+	uint64_t remainder;
+
+	/* determine integer part */
+
+	uint64_t res_value = complete_integer_division_u64(
+		arg1_value, arg2_value, &remainder);
+
+	ASSERT(res_value <= LONG_MAX);
+
+	/* determine fractional part */
+	{
+		uint32_t i = BITS_PER_FRACTIONAL_PART;
+
+		do {
+			remainder <<= 1;
+
+			res_value <<= 1;
+
+			if (remainder >= arg2_value) {
+				res_value |= 1;
+				remainder -= arg2_value;
+			}
+		} while (--i != 0);
+	}
+
+	/* round up LSB */
+	{
+		uint64_t summand = (remainder << 1) >= arg2_value;
+
+		ASSERT(res_value <= LLONG_MAX - summand);
+
+		res_value += summand;
+	}
+
+	res.value = (int64_t)res_value;
+
+	if (arg1_negative ^ arg2_negative)
+		res.value = -res.value;
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_from_int(
+	int64_t arg)
+{
+	struct fixed31_32 res;
+
+	ASSERT((LONG_MIN <= arg) && (arg <= LONG_MAX));
+
+	res.value = arg << BITS_PER_FRACTIONAL_PART;
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_neg(
+	struct fixed31_32 arg)
+{
+	struct fixed31_32 res;
+
+	res.value = -arg.value;
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_abs(
+	struct fixed31_32 arg)
+{
+	if (arg.value < 0)
+		return dal_fixed31_32_neg(arg);
+	else
+		return arg;
+}
+
+bool dal_fixed31_32_lt(
+	struct fixed31_32 arg1,
+	struct fixed31_32 arg2)
+{
+	return arg1.value < arg2.value;
+}
+
+bool dal_fixed31_32_le(
+	struct fixed31_32 arg1,
+	struct fixed31_32 arg2)
+{
+	return arg1.value <= arg2.value;
+}
+
+bool dal_fixed31_32_eq(
+	struct fixed31_32 arg1,
+	struct fixed31_32 arg2)
+{
+	return arg1.value == arg2.value;
+}
+
+struct fixed31_32 dal_fixed31_32_min(
+	struct fixed31_32 arg1,
+	struct fixed31_32 arg2)
+{
+	if (arg1.value <= arg2.value)
+		return arg1;
+	else
+		return arg2;
+}
+
+struct fixed31_32 dal_fixed31_32_max(
+	struct fixed31_32 arg1,
+	struct fixed31_32 arg2)
+{
+	if (arg1.value <= arg2.value)
+		return arg2;
+	else
+		return arg1;
+}
+
+struct fixed31_32 dal_fixed31_32_clamp(
+	struct fixed31_32 arg,
+	struct fixed31_32 min_value,
+	struct fixed31_32 max_value)
+{
+	if (dal_fixed31_32_le(arg, min_value))
+		return min_value;
+	else if (dal_fixed31_32_le(max_value, arg))
+		return max_value;
+	else
+		return arg;
+}
+
+struct fixed31_32 dal_fixed31_32_shl(
+	struct fixed31_32 arg,
+	uint8_t shift)
+{
+	struct fixed31_32 res;
+
+	ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
+		((arg.value < 0) && (arg.value >= LLONG_MIN >> shift)));
+
+	res.value = arg.value << shift;
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_shr(
+	struct fixed31_32 arg,
+	uint8_t shift)
+{
+	struct fixed31_32 res;
+
+	ASSERT(shift < 64);
+
+	res.value = arg.value >> shift;
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_add(
+	struct fixed31_32 arg1,
+	struct fixed31_32 arg2)
+{
+	struct fixed31_32 res;
+
+	ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
+		((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
+
+	res.value = arg1.value + arg2.value;
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sub_int(
+	struct fixed31_32 arg1,
+	int32_t arg2)
+{
+	return dal_fixed31_32_sub(
+		arg1,
+		dal_fixed31_32_from_int(arg2));
+}
+
+struct fixed31_32 dal_fixed31_32_sub(
+	struct fixed31_32 arg1,
+	struct fixed31_32 arg2)
+{
+	struct fixed31_32 res;
+
+	ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
+		((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
+
+	res.value = arg1.value - arg2.value;
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_mul_int(
+	struct fixed31_32 arg1,
+	int32_t arg2)
+{
+	return dal_fixed31_32_mul(
+		arg1,
+		dal_fixed31_32_from_int(arg2));
+}
+
+struct fixed31_32 dal_fixed31_32_mul(
+	struct fixed31_32 arg1,
+	struct fixed31_32 arg2)
+{
+	struct fixed31_32 res;
+
+	bool arg1_negative = arg1.value < 0;
+	bool arg2_negative = arg2.value < 0;
+
+	uint64_t arg1_value = arg1_negative ? -arg1.value : arg1.value;
+	uint64_t arg2_value = arg2_negative ? -arg2.value : arg2.value;
+
+	uint64_t arg1_int = GET_INTEGER_PART(arg1_value);
+	uint64_t arg2_int = GET_INTEGER_PART(arg2_value);
+
+	uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
+	uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
+
+	uint64_t tmp;
+
+	res.value = arg1_int * arg2_int;
+
+	ASSERT(res.value <= LONG_MAX);
+
+	res.value <<= BITS_PER_FRACTIONAL_PART;
+
+	tmp = arg1_int * arg2_fra;
+
+	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+	res.value += tmp;
+
+	tmp = arg2_int * arg1_fra;
+
+	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+	res.value += tmp;
+
+	tmp = arg1_fra * arg2_fra;
+
+	tmp = (tmp >> BITS_PER_FRACTIONAL_PART) +
+		(tmp >= (uint64_t)dal_fixed31_32_half.value);
+
+	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+	res.value += tmp;
+
+	if (arg1_negative ^ arg2_negative)
+		res.value = -res.value;
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sqr(
+	struct fixed31_32 arg)
+{
+	struct fixed31_32 res;
+
+	uint64_t arg_value = abs_i64(arg.value);
+
+	uint64_t arg_int = GET_INTEGER_PART(arg_value);
+
+	uint64_t arg_fra = GET_FRACTIONAL_PART(arg_value);
+
+	uint64_t tmp;
+
+	res.value = arg_int * arg_int;
+
+	ASSERT(res.value <= LONG_MAX);
+
+	res.value <<= BITS_PER_FRACTIONAL_PART;
+
+	tmp = arg_int * arg_fra;
+
+	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+	res.value += tmp;
+
+	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+	res.value += tmp;
+
+	tmp = arg_fra * arg_fra;
+
+	tmp = (tmp >> BITS_PER_FRACTIONAL_PART) +
+		(tmp >= (uint64_t)dal_fixed31_32_half.value);
+
+	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+	res.value += tmp;
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_div_int(
+	struct fixed31_32 arg1,
+	int64_t arg2)
+{
+	return dal_fixed31_32_from_fraction(
+		arg1.value,
+		dal_fixed31_32_from_int(arg2).value);
+}
+
+struct fixed31_32 dal_fixed31_32_div(
+	struct fixed31_32 arg1,
+	struct fixed31_32 arg2)
+{
+	return dal_fixed31_32_from_fraction(
+		arg1.value,
+		arg2.value);
+}
+
+struct fixed31_32 dal_fixed31_32_recip(
+	struct fixed31_32 arg)
+{
+	/*
+	 * @note
+	 * Good idea to use Newton's method
+	 */
+
+	ASSERT(arg.value);
+
+	return dal_fixed31_32_from_fraction(
+		dal_fixed31_32_one.value,
+		arg.value);
+}
+
+struct fixed31_32 dal_fixed31_32_sinc(
+	struct fixed31_32 arg)
+{
+	struct fixed31_32 square;
+
+	struct fixed31_32 res = dal_fixed31_32_one;
+
+	int32_t n = 27;
+
+	struct fixed31_32 arg_norm = arg;
+
+	if (dal_fixed31_32_le(
+		dal_fixed31_32_two_pi,
+		dal_fixed31_32_abs(arg))) {
+		arg_norm = dal_fixed31_32_sub(
+			arg_norm,
+			dal_fixed31_32_mul_int(
+				dal_fixed31_32_two_pi,
+				(int32_t)div64_s64(
+					arg_norm.value,
+					dal_fixed31_32_two_pi.value)));
+	}
+
+	square = dal_fixed31_32_sqr(arg_norm);
+
+	do {
+		res = dal_fixed31_32_sub(
+			dal_fixed31_32_one,
+			dal_fixed31_32_div_int(
+				dal_fixed31_32_mul(
+					square,
+					res),
+				n * (n - 1)));
+
+		n -= 2;
+	} while (n > 2);
+
+	if (arg.value != arg_norm.value)
+		res = dal_fixed31_32_div(
+			dal_fixed31_32_mul(res, arg_norm),
+			arg);
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sin(
+	struct fixed31_32 arg)
+{
+	return dal_fixed31_32_mul(
+		arg,
+		dal_fixed31_32_sinc(arg));
+}
+
+struct fixed31_32 dal_fixed31_32_cos(
+	struct fixed31_32 arg)
+{
+	/* TODO implement argument normalization */
+
+	const struct fixed31_32 square = dal_fixed31_32_sqr(arg);
+
+	struct fixed31_32 res = dal_fixed31_32_one;
+
+	int32_t n = 26;
+
+	do {
+		res = dal_fixed31_32_sub(
+			dal_fixed31_32_one,
+			dal_fixed31_32_div_int(
+				dal_fixed31_32_mul(
+					square,
+					res),
+				n * (n - 1)));
+
+		n -= 2;
+	} while (n != 0);
+
+	return res;
+}
+
+/*
+ * @brief
+ * result = exp(arg),
+ * where abs(arg) < 1
+ *
+ * Calculated as Taylor series.
+ */
+static struct fixed31_32 fixed31_32_exp_from_taylor_series(
+	struct fixed31_32 arg)
+{
+	uint32_t n = 9;
+
+	struct fixed31_32 res = dal_fixed31_32_from_fraction(
+		n + 2,
+		n + 1);
+	/* TODO find correct res */
+
+	ASSERT(dal_fixed31_32_lt(arg, dal_fixed31_32_one));
+
+	do
+		res = dal_fixed31_32_add(
+			dal_fixed31_32_one,
+			dal_fixed31_32_div_int(
+				dal_fixed31_32_mul(
+					arg,
+					res),
+				n));
+	while (--n != 1);
+
+	return dal_fixed31_32_add(
+		dal_fixed31_32_one,
+		dal_fixed31_32_mul(
+			arg,
+			res));
+}
+
+struct fixed31_32 dal_fixed31_32_exp(
+	struct fixed31_32 arg)
+{
+	/*
+	 * @brief
+	 * Main equation is:
+	 * exp(x) = exp(r + m * ln(2)) = (1 << m) * exp(r),
+	 * where m = round(x / ln(2)), r = x - m * ln(2)
+	 */
+
+	if (dal_fixed31_32_le(
+		dal_fixed31_32_ln2_div_2,
+		dal_fixed31_32_abs(arg))) {
+		int32_t m = dal_fixed31_32_round(
+			dal_fixed31_32_div(
+				arg,
+				dal_fixed31_32_ln2));
+
+		struct fixed31_32 r = dal_fixed31_32_sub(
+			arg,
+			dal_fixed31_32_mul_int(
+				dal_fixed31_32_ln2,
+				m));
+
+		ASSERT(m != 0);
+
+		ASSERT(dal_fixed31_32_lt(
+			dal_fixed31_32_abs(r),
+			dal_fixed31_32_one));
+
+		if (m > 0)
+			return dal_fixed31_32_shl(
+				fixed31_32_exp_from_taylor_series(r),
+				(uint8_t)m);
+		else
+			return dal_fixed31_32_div_int(
+				fixed31_32_exp_from_taylor_series(r),
+				1LL << -m);
+	} else if (arg.value != 0)
+		return fixed31_32_exp_from_taylor_series(arg);
+	else
+		return dal_fixed31_32_one;
+}
+
+struct fixed31_32 dal_fixed31_32_log(
+	struct fixed31_32 arg)
+{
+	struct fixed31_32 res = dal_fixed31_32_neg(dal_fixed31_32_one);
+	/* TODO improve 1st estimation */
+
+	struct fixed31_32 error;
+
+	ASSERT(arg.value > 0);
+	/* TODO if arg is negative, return NaN */
+	/* TODO if arg is zero, return -INF */
+
+	do {
+		struct fixed31_32 res1 = dal_fixed31_32_add(
+			dal_fixed31_32_sub(
+				res,
+				dal_fixed31_32_one),
+			dal_fixed31_32_div(
+				arg,
+				dal_fixed31_32_exp(res)));
+
+		error = dal_fixed31_32_sub(
+			res,
+			res1);
+
+		res = res1;
+		/* TODO determine max_allowed_error based on quality of exp() */
+	} while (abs_i64(error.value) > 100ULL);
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_pow(
+	struct fixed31_32 arg1,
+	struct fixed31_32 arg2)
+{
+	return dal_fixed31_32_exp(
+		dal_fixed31_32_mul(
+			dal_fixed31_32_log(arg1),
+			arg2));
+}
+
+int32_t dal_fixed31_32_floor(
+	struct fixed31_32 arg)
+{
+	uint64_t arg_value = abs_i64(arg.value);
+
+	if (arg.value >= 0)
+		return (int32_t)GET_INTEGER_PART(arg_value);
+	else
+		return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+int32_t dal_fixed31_32_round(
+	struct fixed31_32 arg)
+{
+	uint64_t arg_value = abs_i64(arg.value);
+
+	const int64_t summand = dal_fixed31_32_half.value;
+
+	ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
+
+	arg_value += summand;
+
+	if (arg.value >= 0)
+		return (int32_t)GET_INTEGER_PART(arg_value);
+	else
+		return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+int32_t dal_fixed31_32_ceil(
+	struct fixed31_32 arg)
+{
+	uint64_t arg_value = abs_i64(arg.value);
+
+	const int64_t summand = dal_fixed31_32_one.value -
+		dal_fixed31_32_epsilon.value;
+
+	ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
+
+	arg_value += summand;
+
+	if (arg.value >= 0)
+		return (int32_t)GET_INTEGER_PART(arg_value);
+	else
+		return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+/* this function is a generic helper to translate fixed point value to
+ * specified integer format that will consist of integer_bits integer part and
+ * fractional_bits fractional part. For example it is used in
+ * dal_fixed31_32_u2d19 to receive 2 bits integer part and 19 bits fractional
+ * part in 32 bits. It is used in hw programming (scaler)
+ */
+
+static inline uint32_t ux_dy(
+	int64_t value,
+	uint32_t integer_bits,
+	uint32_t fractional_bits)
+{
+	/* 1. create mask of integer part */
+	uint32_t result = (1 << integer_bits) - 1;
+	/* 2. mask out fractional part */
+	uint32_t fractional_part = FRACTIONAL_PART_MASK & value;
+	/* 3. shrink fixed point integer part to be of integer_bits width*/
+	result &= GET_INTEGER_PART(value);
+	/* 4. make space for fractional part to be filled in after integer */
+	result <<= fractional_bits;
+	/* 5. shrink fixed point fractional part to of fractional_bits width*/
+	fractional_part >>= BITS_PER_FRACTIONAL_PART - fractional_bits;
+	/* 6. merge the result */
+	return result | fractional_part;
+}
+
+uint32_t dal_fixed31_32_u2d19(
+	struct fixed31_32 arg)
+{
+	return ux_dy(arg.value, 2, 19);
+}
+
+uint32_t dal_fixed31_32_u0d19(
+	struct fixed31_32 arg)
+{
+	return ux_dy(arg.value, 0, 19);
+}

+ 221 - 0
drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c

@@ -0,0 +1,221 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/fixed32_32.h"
+
+static uint64_t u64_div(uint64_t n, uint64_t d)
+{
+	uint32_t i = 0;
+	uint64_t r;
+	uint64_t q = div64_u64_rem(n, d, &r);
+
+	for (i = 0; i < 32; ++i) {
+		uint64_t sbit = q & (1ULL<<63);
+
+		r <<= 1;
+		r |= sbit ? 1 : 0;
+		q <<= 1;
+		if (r >= d) {
+			r -= d;
+			q |= 1;
+		}
+	}
+
+	if (2*r >= d)
+		q += 1;
+	return q;
+}
+
+struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d)
+{
+	struct fixed32_32 fx;
+
+	fx.value = u64_div((uint64_t)n << 32, (uint64_t)d << 32);
+	return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_from_int(uint32_t value)
+{
+	struct fixed32_32 fx;
+
+	fx.value = (uint64_t)value<<32;
+	return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_add(
+	struct fixed32_32 lhs,
+	struct fixed32_32 rhs)
+{
+	struct fixed32_32 fx = {lhs.value + rhs.value};
+
+	ASSERT(fx.value >= rhs.value);
+	return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_add_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+	struct fixed32_32 fx = {lhs.value + ((uint64_t)rhs << 32)};
+
+	ASSERT(fx.value >= (uint64_t)rhs << 32);
+	return fx;
+
+}
+struct fixed32_32 dal_fixed32_32_sub(
+	struct fixed32_32 lhs,
+	struct fixed32_32 rhs)
+{
+	struct fixed32_32 fx;
+
+	ASSERT(lhs.value >= rhs.value);
+	fx.value = lhs.value - rhs.value;
+	return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_sub_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+	struct fixed32_32 fx;
+
+	ASSERT(lhs.value >= ((uint64_t)rhs<<32));
+	fx.value = lhs.value - ((uint64_t)rhs<<32);
+	return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_mul(
+	struct fixed32_32 lhs,
+	struct fixed32_32 rhs)
+{
+	struct fixed32_32 fx;
+	uint64_t lhs_int = lhs.value>>32;
+	uint64_t lhs_frac = (uint32_t)lhs.value;
+	uint64_t rhs_int = rhs.value>>32;
+	uint64_t rhs_frac = (uint32_t)rhs.value;
+	uint64_t ahbh = lhs_int * rhs_int;
+	uint64_t ahbl = lhs_int * rhs_frac;
+	uint64_t albh = lhs_frac * rhs_int;
+	uint64_t albl = lhs_frac * rhs_frac;
+
+	ASSERT((ahbh>>32) == 0);
+
+	fx.value = (ahbh<<32) + ahbl + albh + (albl>>32);
+	return fx;
+
+}
+
+struct fixed32_32 dal_fixed32_32_mul_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+	struct fixed32_32 fx;
+	uint64_t lhsi = (lhs.value>>32) * (uint64_t)rhs;
+	uint64_t lhsf;
+
+	ASSERT((lhsi>>32) == 0);
+	lhsf = ((uint32_t)lhs.value) * (uint64_t)rhs;
+	ASSERT((lhsi<<32) + lhsf >= lhsf);
+	fx.value = (lhsi<<32) + lhsf;
+	return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_div(
+	struct fixed32_32 lhs,
+	struct fixed32_32 rhs)
+{
+	struct fixed32_32 fx;
+
+	fx.value = u64_div(lhs.value, rhs.value);
+	return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_div_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+	struct fixed32_32 fx;
+
+	fx.value = u64_div(lhs.value, (uint64_t)rhs << 32);
+	return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_min(
+	struct fixed32_32 lhs,
+	struct fixed32_32 rhs)
+{
+	return (lhs.value < rhs.value) ? lhs : rhs;
+}
+
+struct fixed32_32 dal_fixed32_32_max(
+	struct fixed32_32 lhs,
+	struct fixed32_32 rhs)
+{
+	return (lhs.value > rhs.value) ? lhs : rhs;
+}
+
+bool dal_fixed32_32_gt(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+	return lhs.value > rhs.value;
+}
+bool dal_fixed32_32_gt_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+	return lhs.value > ((uint64_t)rhs<<32);
+}
+
+bool dal_fixed32_32_lt(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+	return lhs.value < rhs.value;
+}
+
+bool dal_fixed32_32_le(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+	return lhs.value <= rhs.value;
+}
+
+bool dal_fixed32_32_lt_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+	return lhs.value < ((uint64_t)rhs<<32);
+}
+
+bool dal_fixed32_32_le_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+	return lhs.value <= ((uint64_t)rhs<<32);
+}
+
+uint32_t dal_fixed32_32_ceil(struct fixed32_32 v)
+{
+	ASSERT((uint32_t)v.value ? (v.value >> 32) + 1 >= 1 : true);
+	return (v.value>>32) + ((uint32_t)v.value ? 1 : 0);
+}
+
+uint32_t dal_fixed32_32_floor(struct fixed32_32 v)
+{
+	return v.value>>32;
+}
+
+uint32_t dal_fixed32_32_round(struct fixed32_32 v)
+{
+	ASSERT(v.value + (1ULL<<31) >= (1ULL<<31));
+	return (v.value + (1ULL<<31))>>32;
+}
+
+bool dal_fixed32_32_eq(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+	return lhs.value == rhs.value;
+}

+ 134 - 0
drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c

@@ -0,0 +1,134 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/grph_object_id.h"
+
+bool dal_graphics_object_id_is_valid(struct graphics_object_id id)
+{
+	bool rc = true;
+
+	switch (id.type) {
+	case OBJECT_TYPE_UNKNOWN:
+		rc = false;
+		break;
+	case OBJECT_TYPE_GPU:
+	case OBJECT_TYPE_ENGINE:
+		/* do NOT check for id.id == 0 */
+		if (id.enum_id == ENUM_ID_UNKNOWN)
+			rc = false;
+		break;
+	default:
+		if (id.id == 0 || id.enum_id == ENUM_ID_UNKNOWN)
+			rc = false;
+		break;
+	}
+
+	return rc;
+}
+
+bool dal_graphics_object_id_is_equal(
+	struct graphics_object_id id1,
+	struct graphics_object_id id2)
+{
+	if (false == dal_graphics_object_id_is_valid(id1)) {
+		dm_output_to_console(
+		"%s: Warning: comparing invalid object 'id1'!\n", __func__);
+		return false;
+	}
+
+	if (false == dal_graphics_object_id_is_valid(id2)) {
+		dm_output_to_console(
+		"%s: Warning: comparing invalid object 'id2'!\n", __func__);
+		return false;
+	}
+
+	if (id1.id == id2.id && id1.enum_id == id2.enum_id
+		&& id1.type == id2.type)
+		return true;
+
+	return false;
+}
+
+/* Based on internal data members memory layout */
+uint32_t dal_graphics_object_id_to_uint(struct graphics_object_id id)
+{
+	uint32_t object_id = 0;
+
+	object_id = id.id + (id.enum_id << 0x8) + (id.type << 0xc);
+	return object_id;
+}
+
+/*
+ * ******* get specific ID - internal safe cast into specific type *******
+ */
+
+enum controller_id dal_graphics_object_id_get_controller_id(
+	struct graphics_object_id id)
+{
+	if (id.type == OBJECT_TYPE_CONTROLLER)
+		return id.id;
+	return CONTROLLER_ID_UNDEFINED;
+}
+
+enum clock_source_id dal_graphics_object_id_get_clock_source_id(
+	struct graphics_object_id id)
+{
+	if (id.type == OBJECT_TYPE_CLOCK_SOURCE)
+		return id.id;
+	return CLOCK_SOURCE_ID_UNDEFINED;
+}
+
+enum encoder_id dal_graphics_object_id_get_encoder_id(
+	struct graphics_object_id id)
+{
+	if (id.type == OBJECT_TYPE_ENCODER)
+		return id.id;
+	return ENCODER_ID_UNKNOWN;
+}
+
+enum connector_id dal_graphics_object_id_get_connector_id(
+	struct graphics_object_id id)
+{
+	if (id.type == OBJECT_TYPE_CONNECTOR)
+		return id.id;
+	return CONNECTOR_ID_UNKNOWN;
+}
+
+enum audio_id dal_graphics_object_id_get_audio_id(struct graphics_object_id id)
+{
+	if (id.type == OBJECT_TYPE_AUDIO)
+		return id.id;
+	return AUDIO_ID_UNKNOWN;
+}
+
+enum engine_id dal_graphics_object_id_get_engine_id(
+	struct graphics_object_id id)
+{
+	if (id.type == OBJECT_TYPE_ENGINE)
+		return id.id;
+	return ENGINE_ID_UNKNOWN;
+}
+

+ 100 - 0
drivers/gpu/drm/amd/display/dc/basics/log_helpers.c

@@ -0,0 +1,100 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "logger.h"
+#include "include/logger_interface.h"
+
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
+struct dc_signal_type_info {
+	enum signal_type type;
+	char name[MAX_NAME_LEN];
+};
+
+static const struct dc_signal_type_info signal_type_info_tbl[] = {
+		{SIGNAL_TYPE_NONE,             "NC"},
+		{SIGNAL_TYPE_DVI_SINGLE_LINK,  "DVI"},
+		{SIGNAL_TYPE_DVI_DUAL_LINK,    "DDVI"},
+		{SIGNAL_TYPE_HDMI_TYPE_A,      "HDMIA"},
+		{SIGNAL_TYPE_LVDS,             "LVDS"},
+		{SIGNAL_TYPE_RGB,              "VGA"},
+		{SIGNAL_TYPE_DISPLAY_PORT,     "DP"},
+		{SIGNAL_TYPE_DISPLAY_PORT_MST, "MST"},
+		{SIGNAL_TYPE_EDP,              "eDP"},
+		{SIGNAL_TYPE_WIRELESS,         "Wireless"},
+		{SIGNAL_TYPE_VIRTUAL,          "Virtual"}
+};
+
+void dc_conn_log(struct dc_context *ctx,
+		const struct dc_link *link,
+		uint8_t *hex_data,
+		int hex_data_count,
+		enum dc_log_type event,
+		const char *msg,
+		...)
+{
+	int i;
+	va_list args;
+	struct log_entry entry = { 0 };
+	enum signal_type signal;
+
+	if (link->local_sink)
+		signal = link->local_sink->sink_signal;
+	else
+		signal = link->connector_signal;
+
+	if (link->type == dc_connection_mst_branch)
+		signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+
+	dm_logger_open(ctx->logger, &entry, event);
+
+	for (i = 0; i < NUM_ELEMENTS(signal_type_info_tbl); i++)
+		if (signal == signal_type_info_tbl[i].type)
+			break;
+
+	dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
+			signal_type_info_tbl[i].name,
+			link->link_index);
+
+	va_start(args, msg);
+	entry.buf_offset += dm_log_to_buffer(
+		&entry.buf[entry.buf_offset],
+		LOG_MAX_LINE_SIZE - entry.buf_offset,
+		msg, args);
+
+	if (entry.buf[strlen(entry.buf) - 1] == '\n') {
+		entry.buf[strlen(entry.buf) - 1] = '\0';
+		entry.buf_offset--;
+	}
+
+	if (hex_data)
+		for (i = 0; i < hex_data_count; i++)
+			dm_logger_append(&entry, "%2.2X ", hex_data[i]);
+
+	dm_logger_append(&entry, "^\n");
+	dm_logger_close(&entry);
+	va_end(args);
+}

+ 457 - 0
drivers/gpu/drm/amd/display/dc/basics/logger.c

@@ -0,0 +1,457 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "include/logger_interface.h"
+#include "logger.h"
+
+
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct dc_log_type_info log_type_info_tbl[] = {
+		{LOG_ERROR,                 "Error"},
+		{LOG_WARNING,               "Warning"},
+		{LOG_DC,                    "DC_Interface"},
+		{LOG_SURFACE,               "Surface"},
+		{LOG_HW_HOTPLUG,            "HW_Hotplug"},
+		{LOG_HW_LINK_TRAINING,      "HW_LKTN"},
+		{LOG_HW_SET_MODE,           "HW_Mode"},
+		{LOG_HW_RESUME_S3,          "HW_Resume"},
+		{LOG_HW_AUDIO,              "HW_Audio"},
+		{LOG_HW_HPD_IRQ,            "HW_HPDIRQ"},
+		{LOG_MST,                   "MST"},
+		{LOG_SCALER,                "Scaler"},
+		{LOG_BIOS,                  "BIOS"},
+		{LOG_BANDWIDTH_CALCS,       "BWCalcs"},
+		{LOG_BANDWIDTH_VALIDATION,  "BWValidation"},
+		{LOG_I2C_AUX,               "I2C_AUX"},
+		{LOG_SYNC,                  "Sync"},
+		{LOG_BACKLIGHT,             "Backlight"},
+		{LOG_FEATURE_OVERRIDE,      "Override"},
+		{LOG_DETECTION_EDID_PARSER, "Edid"},
+		{LOG_DETECTION_DP_CAPS,     "DP_Caps"},
+		{LOG_RESOURCE,              "Resource"},
+		{LOG_DML,                   "DML"},
+		{LOG_EVENT_MODE_SET,        "Mode"},
+		{LOG_EVENT_DETECTION,       "Detect"},
+		{LOG_EVENT_LINK_TRAINING,   "LKTN"},
+		{LOG_EVENT_LINK_LOSS,       "LinkLoss"},
+		{LOG_EVENT_UNDERFLOW,       "Underflow"},
+		{LOG_IF_TRACE,				"InterfaceTrace"}
+};
+
+
+#define DC_DEFAULT_LOG_MASK ((1 << LOG_ERROR) | \
+		(1 << LOG_WARNING) | \
+		(1 << LOG_EVENT_MODE_SET) | \
+		(1 << LOG_EVENT_DETECTION) | \
+		(1 << LOG_EVENT_LINK_TRAINING) | \
+		(1 << LOG_EVENT_LINK_LOSS) | \
+		(1 << LOG_EVENT_UNDERFLOW) | \
+		(1 << LOG_RESOURCE) | \
+		(1 << LOG_FEATURE_OVERRIDE) | \
+		(1 << LOG_DETECTION_EDID_PARSER) | \
+		(1 << LOG_DC) | \
+		(1 << LOG_HW_HOTPLUG) | \
+		(1 << LOG_HW_SET_MODE) | \
+		(1 << LOG_HW_RESUME_S3) | \
+		(1 << LOG_HW_HPD_IRQ) | \
+		(1 << LOG_SYNC) | \
+		(1 << LOG_BANDWIDTH_VALIDATION) | \
+		(1 << LOG_MST) | \
+		(1 << LOG_BIOS) | \
+		(1 << LOG_DETECTION_EDID_PARSER) | \
+		(1 << LOG_DETECTION_DP_CAPS) | \
+		(1 << LOG_BACKLIGHT)) | \
+		(1 << LOG_I2C_AUX) | \
+		(1 << LOG_IF_TRACE) /* | \
+		(1 << LOG_SURFACE) | \
+		(1 << LOG_SCALER) | \
+		(1 << LOG_DML) | \
+		(1 << LOG_HW_LINK_TRAINING) | \
+		(1 << LOG_HW_AUDIO)| \
+		(1 << LOG_BANDWIDTH_CALCS)*/
+
+/* ----------- Object init and destruction ----------- */
+static bool construct(struct dc_context *ctx, struct dal_logger *logger)
+{
+	/* malloc buffer and init offsets */
+	logger->log_buffer_size = DAL_LOGGER_BUFFER_MAX_SIZE;
+	logger->log_buffer = (char *)dm_alloc(logger->log_buffer_size *
+		sizeof(char));
+
+	if (!logger->log_buffer)
+		return false;
+
+	/* Initialize both offsets to start of buffer (empty) */
+	logger->buffer_read_offset = 0;
+	logger->buffer_write_offset = 0;
+
+	logger->write_wrap_count = 0;
+	logger->read_wrap_count = 0;
+	logger->open_count = 0;
+
+	logger->flags.bits.ENABLE_CONSOLE = 1;
+	logger->flags.bits.ENABLE_BUFFER = 0;
+
+	logger->ctx = ctx;
+
+	logger->mask = DC_DEFAULT_LOG_MASK;
+
+	return true;
+}
+
+static void destruct(struct dal_logger *logger)
+{
+	if (logger->log_buffer) {
+		dm_free(logger->log_buffer);
+		logger->log_buffer = NULL;
+	}
+}
+
+struct dal_logger *dal_logger_create(struct dc_context *ctx)
+{
+	/* malloc struct */
+	struct dal_logger *logger = dm_alloc(sizeof(struct dal_logger));
+
+	if (!logger)
+		return NULL;
+	if (!construct(ctx, logger)) {
+		dm_free(logger);
+		return NULL;
+	}
+
+	return logger;
+}
+
+uint32_t dal_logger_destroy(struct dal_logger **logger)
+{
+	if (logger == NULL || *logger == NULL)
+		return 1;
+	destruct(*logger);
+	dm_free(*logger);
+	*logger = NULL;
+
+	return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+
+static bool dal_logger_should_log(
+	struct dal_logger *logger,
+	enum dc_log_type log_type)
+{
+	if (logger->mask & (1 << log_type))
+		return true;
+
+	return false;
+}
+
+static void log_to_debug_console(struct log_entry *entry)
+{
+	struct dal_logger *logger = entry->logger;
+
+	if (logger->flags.bits.ENABLE_CONSOLE == 0)
+		return;
+
+	if (entry->buf_offset) {
+		switch (entry->type) {
+		case LOG_ERROR:
+			dm_error("%s", entry->buf);
+			break;
+		default:
+			dm_output_to_console("%s", entry->buf);
+			break;
+		}
+	}
+}
+
+/* Print everything unread existing in log_buffer to debug console*/
+static void flush_to_debug_console(struct dal_logger *logger)
+{
+	int i = logger->buffer_read_offset;
+	char *string_start = &logger->log_buffer[i];
+
+	dm_output_to_console(
+		"---------------- FLUSHING LOG BUFFER ----------------\n");
+	while (i < logger->buffer_write_offset)	{
+
+		if (logger->log_buffer[i] == '\0') {
+			dm_output_to_console("%s", string_start);
+			string_start = (char *)logger->log_buffer + i + 1;
+		}
+		i++;
+	}
+	dm_output_to_console(
+		"-------------- END FLUSHING LOG BUFFER --------------\n\n");
+}
+
+static void log_to_internal_buffer(struct log_entry *entry)
+{
+
+	uint32_t size = entry->buf_offset;
+	struct dal_logger *logger = entry->logger;
+
+	if (logger->flags.bits.ENABLE_BUFFER == 0)
+		return;
+
+	if (logger->log_buffer == NULL)
+		return;
+
+	if (size > 0 && size < logger->log_buffer_size) {
+
+		int total_free_space = 0;
+		int space_before_wrap = 0;
+
+		if (logger->buffer_write_offset > logger->buffer_read_offset) {
+			total_free_space = logger->log_buffer_size -
+					logger->buffer_write_offset +
+					logger->buffer_read_offset;
+			space_before_wrap = logger->log_buffer_size -
+					logger->buffer_write_offset;
+		} else if (logger->buffer_write_offset <
+				logger->buffer_read_offset) {
+			total_free_space = logger->log_buffer_size -
+					logger->buffer_read_offset +
+					logger->buffer_write_offset;
+			space_before_wrap = total_free_space;
+		} else if (logger->write_wrap_count !=
+				logger->read_wrap_count) {
+			/* Buffer is completely full already */
+			total_free_space = 0;
+			space_before_wrap = 0;
+		} else {
+			/* Buffer is empty, start writing at beginning */
+			total_free_space = logger->log_buffer_size;
+			space_before_wrap = logger->log_buffer_size;
+			logger->buffer_write_offset = 0;
+			logger->buffer_read_offset = 0;
+		}
+
+		if (space_before_wrap > size) {
+			/* No wrap around, copy 'size' bytes
+			 * from 'entry->buf' to 'log_buffer'
+			 */
+			memmove(logger->log_buffer +
+					logger->buffer_write_offset,
+					entry->buf, size);
+			logger->buffer_write_offset += size;
+
+		} else if (total_free_space > size) {
+			/* We have enough room without flushing,
+			 * but need to wrap around */
+
+			int space_after_wrap = total_free_space -
+					space_before_wrap;
+
+			memmove(logger->log_buffer +
+					logger->buffer_write_offset,
+					entry->buf, space_before_wrap);
+			memmove(logger->log_buffer, entry->buf +
+					space_before_wrap, space_after_wrap);
+
+			logger->buffer_write_offset = space_after_wrap;
+			logger->write_wrap_count++;
+
+		} else {
+			/* Not enough room remaining, we should flush
+			 * existing logs */
+
+			/* Flush existing unread logs to console */
+			flush_to_debug_console(logger);
+
+			/* Start writing to beginning of buffer */
+			memmove(logger->log_buffer, entry->buf, size);
+			logger->buffer_write_offset = size;
+			logger->buffer_read_offset = 0;
+		}
+
+	}
+}
+
+static void log_heading(struct log_entry *entry)
+{
+	int j;
+
+	for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
+
+		const struct dc_log_type_info *info = &log_type_info_tbl[j];
+
+		if (info->type == entry->type)
+			dm_logger_append(entry, "[%s]\t", info->name);
+	}
+}
+
+static void append_entry(
+		struct log_entry *entry,
+		char *buffer,
+		uint32_t buf_size)
+{
+	if (!entry->buf ||
+		entry->buf_offset + buf_size > entry->max_buf_bytes
+	) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	/* Todo: check if off by 1 byte due to \0 anywhere */
+	memmove(entry->buf + entry->buf_offset, buffer, buf_size);
+	entry->buf_offset += buf_size;
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* Warning: Be careful that 'msg' is null terminated and the total size is
+ * less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
+ */
+void dm_logger_write(
+	struct dal_logger *logger,
+	enum dc_log_type log_type,
+	const char *msg,
+	...)
+{
+	if (logger && dal_logger_should_log(logger, log_type)) {
+		uint32_t size;
+		va_list args;
+		char buffer[LOG_MAX_LINE_SIZE];
+		struct log_entry entry;
+
+		va_start(args, msg);
+
+		entry.logger = logger;
+
+		entry.buf = buffer;
+
+		entry.buf_offset = 0;
+		entry.max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
+
+		entry.type = log_type;
+
+		log_heading(&entry);
+
+		size = dm_log_to_buffer(
+			buffer, LOG_MAX_LINE_SIZE, msg, args);
+
+		entry.buf_offset += size;
+
+		/* --Flush log_entry buffer-- */
+		/* print to kernel console */
+		log_to_debug_console(&entry);
+		/* log internally for dsat */
+		log_to_internal_buffer(&entry);
+
+		va_end(args);
+	}
+}
+
+/* Same as dm_logger_write, except without open() and close(), which must
+ * be done separately.
+ */
+void dm_logger_append(
+	struct log_entry *entry,
+	const char *msg,
+	...)
+{
+	struct dal_logger *logger;
+
+	if (!entry) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	logger = entry->logger;
+
+	if (logger && logger->open_count > 0 &&
+		dal_logger_should_log(logger, entry->type)) {
+
+		uint32_t size;
+		va_list args;
+		char buffer[LOG_MAX_LINE_SIZE];
+
+		va_start(args, msg);
+
+		size = dm_log_to_buffer(
+			buffer, LOG_MAX_LINE_SIZE, msg, args);
+
+		if (size < LOG_MAX_LINE_SIZE - 1) {
+			append_entry(entry, buffer, size);
+		} else {
+			append_entry(entry, "LOG_ERROR, line too long\n", 27);
+		}
+
+		va_end(args);
+	}
+}
+
+void dm_logger_open(
+		struct dal_logger *logger,
+		struct log_entry *entry, /* out */
+		enum dc_log_type log_type)
+{
+	if (!entry) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	entry->type = log_type;
+	entry->logger = logger;
+
+	entry->buf = dm_alloc(DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char));
+
+	entry->buf_offset = 0;
+	entry->max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
+
+	logger->open_count++;
+
+	log_heading(entry);
+}
+
+void dm_logger_close(struct log_entry *entry)
+{
+	struct dal_logger *logger = entry->logger;
+
+	if (logger && logger->open_count > 0) {
+		logger->open_count--;
+	} else {
+		BREAK_TO_DEBUGGER();
+		goto cleanup;
+	}
+
+	/* --Flush log_entry buffer-- */
+	/* print to kernel console */
+	log_to_debug_console(entry);
+	/* log internally for dsat */
+	log_to_internal_buffer(entry);
+
+	/* TODO: Write end heading */
+
+cleanup:
+	if (entry->buf) {
+		dm_free(entry->buf);
+		entry->buf = NULL;
+		entry->buf_offset = 0;
+		entry->max_buf_bytes = 0;
+	}
+}

+ 67 - 0
drivers/gpu/drm/amd/display/dc/basics/logger.h

@@ -0,0 +1,67 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_LOGGER_H__
+#define __DAL_LOGGER_H__
+
+/* Structure for keeping track of offsets, buffer, etc */
+
+#define DAL_LOGGER_BUFFER_MAX_SIZE 2048
+
+/*Connectivity log needs to output EDID, which needs at lease 256x3 bytes,
+ * change log line size to 896 to meet the request.
+ */
+#define LOG_MAX_LINE_SIZE 896
+
+#include "include/logger_types.h"
+
+struct dal_logger {
+
+	/* How far into the circular buffer has been read by dsat
+	 * Read offset should never cross write offset. Write \0's to
+	 * read data just to be sure?
+	 */
+	uint32_t buffer_read_offset;
+
+	/* How far into the circular buffer we have written
+	 * Write offset should never cross read offset
+	 */
+	uint32_t buffer_write_offset;
+
+	uint32_t write_wrap_count;
+	uint32_t read_wrap_count;
+
+	uint32_t open_count;
+
+	char *log_buffer;	/* Pointer to malloc'ed buffer */
+	uint32_t log_buffer_size; /* Size of circular buffer */
+
+	uint32_t mask; /*array of masks for major elements*/
+
+	union logger_flags flags;
+	struct dc_context *ctx;
+};
+
+#endif /* __DAL_LOGGER_H__ */

+ 197 - 0
drivers/gpu/drm/amd/display/dc/basics/register_logger.c

@@ -0,0 +1,197 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/dal_types.h"
+#include "include/logger_interface.h"
+#include "logger.h"
+
+/******************************************************************************
+ * Register Logger.
+ * A facility to create register R/W logs.
+ * Currently used for DAL Test.
+ *****************************************************************************/
+
+/******************************************************************************
+ * Private structures
+ *****************************************************************************/
+struct dal_reg_dump_stack_location {
+	const char *current_caller_func;
+	long current_pid;
+	long current_tgid;
+	uint32_t rw_count;/* register access counter for current function. */
+};
+
+/* This the maximum number of nested calls to the 'reg_dump' facility. */
+#define DAL_REG_DUMP_STACK_MAX_SIZE 32
+
+struct dal_reg_dump_stack {
+	int32_t stack_pointer;
+	struct dal_reg_dump_stack_location
+		stack_locations[DAL_REG_DUMP_STACK_MAX_SIZE];
+	uint32_t total_rw_count; /* Total count for *all* functions. */
+};
+
+static struct dal_reg_dump_stack reg_dump_stack = {0};
+
+/******************************************************************************
+ * Private functions
+ *****************************************************************************/
+
+/* Check if current process is the one which requested register dump.
+ * The reason for the check:
+ * mmCRTC_STATUS_FRAME_COUNT is accessed by dal_controller_get_vblank_counter().
+ * Which runs all the time when at least one display is connected.
+ * (Triggered by drm_mode_page_flip_ioctl()). */
+static bool is_reg_dump_process(void)
+{
+	uint32_t i;
+
+	/* walk the list of our processes */
+	for (i = 0; i < reg_dump_stack.stack_pointer; i++) {
+		struct dal_reg_dump_stack_location *stack_location
+					= &reg_dump_stack.stack_locations[i];
+
+		if (stack_location->current_pid == dm_get_pid()
+			&& stack_location->current_tgid == dm_get_tgid())
+			return true;
+	}
+
+	return false;
+}
+
+static bool dal_reg_dump_stack_is_empty(void)
+{
+	if (reg_dump_stack.stack_pointer <= 0)
+		return true;
+	else
+		return false;
+}
+
+static struct dal_reg_dump_stack_location *dal_reg_dump_stack_push(void)
+{
+	struct dal_reg_dump_stack_location *current_location = NULL;
+
+	if (reg_dump_stack.stack_pointer >= DAL_REG_DUMP_STACK_MAX_SIZE) {
+		/* stack is full */
+		dm_output_to_console("[REG_DUMP]: %s: stack is full!\n",
+				__func__);
+	} else {
+		current_location =
+		&reg_dump_stack.stack_locations[reg_dump_stack.stack_pointer];
+		++reg_dump_stack.stack_pointer;
+	}
+
+	return current_location;
+}
+
+static struct dal_reg_dump_stack_location *dal_reg_dump_stack_pop(void)
+{
+	struct dal_reg_dump_stack_location *current_location = NULL;
+
+	if (dal_reg_dump_stack_is_empty()) {
+		/* stack is empty */
+		dm_output_to_console("[REG_DUMP]: %s: stack is empty!\n",
+				__func__);
+	} else {
+		--reg_dump_stack.stack_pointer;
+		current_location =
+		&reg_dump_stack.stack_locations[reg_dump_stack.stack_pointer];
+	}
+
+	return current_location;
+}
+
+/******************************************************************************
+ * Public functions
+ *****************************************************************************/
+
+void dal_reg_logger_push(const char *caller_func)
+{
+	struct dal_reg_dump_stack_location *free_stack_location;
+
+	free_stack_location = dal_reg_dump_stack_push();
+
+	if (NULL == free_stack_location)
+		return;
+
+	memset(free_stack_location, 0, sizeof(*free_stack_location));
+
+	free_stack_location->current_caller_func = caller_func;
+	free_stack_location->current_pid = dm_get_pid();
+	free_stack_location->current_tgid = dm_get_tgid();
+
+	dm_output_to_console("[REG_DUMP]:%s - start (pid:%ld, tgid:%ld)\n",
+		caller_func,
+		free_stack_location->current_pid,
+		free_stack_location->current_tgid);
+}
+
+void dal_reg_logger_pop(void)
+{
+	struct dal_reg_dump_stack_location *top_stack_location;
+
+	top_stack_location = dal_reg_dump_stack_pop();
+
+	if (NULL == top_stack_location) {
+		dm_output_to_console("[REG_DUMP]:%s - Stack is Empty!\n",
+				__func__);
+		return;
+	}
+
+	dm_output_to_console(
+	"[REG_DUMP]:%s - end."\
+	" Reg R/W Count: Total=%d Function=%d. (pid:%ld, tgid:%ld)\n",
+			top_stack_location->current_caller_func,
+			reg_dump_stack.total_rw_count,
+			top_stack_location->rw_count,
+			dm_get_pid(),
+			dm_get_tgid());
+
+	memset(top_stack_location, 0, sizeof(*top_stack_location));
+}
+
+void dal_reg_logger_rw_count_increment(void)
+{
+	++reg_dump_stack.total_rw_count;
+
+	++reg_dump_stack.stack_locations
+		[reg_dump_stack.stack_pointer - 1].rw_count;
+}
+
+bool dal_reg_logger_should_dump_register(void)
+{
+	if (true == dal_reg_dump_stack_is_empty())
+		return false;
+
+	if (false == is_reg_dump_process())
+		return false;
+
+	return true;
+}
+
+/******************************************************************************
+ * End of File.
+ *****************************************************************************/

+ 116 - 0
drivers/gpu/drm/amd/display/dc/basics/signal_types.c

@@ -0,0 +1,116 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/signal_types.h"
+
+bool dc_is_hdmi_signal(enum signal_type signal)
+{
+	return (signal == SIGNAL_TYPE_HDMI_TYPE_A);
+}
+
+bool dc_is_dp_sst_signal(enum signal_type signal)
+{
+	return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
+		signal == SIGNAL_TYPE_EDP);
+}
+
+bool dc_is_dp_signal(enum signal_type signal)
+{
+	return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
+		signal == SIGNAL_TYPE_EDP ||
+		signal == SIGNAL_TYPE_DISPLAY_PORT_MST);
+}
+
+bool dc_is_dp_external_signal(enum signal_type signal)
+{
+	return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
+		signal == SIGNAL_TYPE_DISPLAY_PORT_MST);
+}
+
+bool dc_is_analog_signal(enum signal_type signal)
+{
+	switch (signal) {
+	case SIGNAL_TYPE_RGB:
+		return true;
+	break;
+	default:
+		return false;
+	}
+}
+
+bool dc_is_embedded_signal(enum signal_type signal)
+{
+	return (signal == SIGNAL_TYPE_EDP || signal == SIGNAL_TYPE_LVDS);
+}
+
+bool dc_is_dvi_signal(enum signal_type signal)
+{
+	switch (signal) {
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+		return true;
+	break;
+	default:
+		return false;
+	}
+}
+
+bool dc_is_dvi_single_link_signal(enum signal_type signal)
+{
+	return (signal == SIGNAL_TYPE_DVI_SINGLE_LINK);
+}
+
+bool dc_is_dual_link_signal(enum signal_type signal)
+{
+	return (signal == SIGNAL_TYPE_DVI_DUAL_LINK);
+}
+
+bool dc_is_audio_capable_signal(enum signal_type signal)
+{
+	return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
+		signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+		dc_is_hdmi_signal(signal) ||
+		signal == SIGNAL_TYPE_WIRELESS);
+}
+
+/*
+ * @brief
+ * Returns whether the signal is compatible
+ * with other digital encoder signal types.
+ * This is true for DVI, LVDS, and HDMI signal types.
+ */
+bool dc_is_digital_encoder_compatible_signal(enum signal_type signal)
+{
+	switch (signal) {
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+	case SIGNAL_TYPE_LVDS:
+		return true;
+	default:
+		return false;
+	}
+}

+ 307 - 0
drivers/gpu/drm/amd/display/dc/basics/vector.c

@@ -0,0 +1,307 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/vector.h"
+
+bool dal_vector_construct(
+	struct vector *vector,
+	struct dc_context *ctx,
+	uint32_t capacity,
+	uint32_t struct_size)
+{
+	vector->container = NULL;
+
+	if (!struct_size || !capacity) {
+		/* Container must be non-zero size*/
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	vector->container = dm_alloc(struct_size * capacity);
+	if (vector->container == NULL)
+		return false;
+	vector->capacity = capacity;
+	vector->struct_size = struct_size;
+	vector->count = 0;
+	vector->ctx = ctx;
+	return true;
+}
+
+bool dal_vector_presized_costruct(
+	struct vector *vector,
+	struct dc_context *ctx,
+	uint32_t count,
+	void *initial_value,
+	uint32_t struct_size)
+{
+	uint32_t i;
+
+	vector->container = NULL;
+
+	if (!struct_size || !count) {
+		/* Container must be non-zero size*/
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	vector->container = dm_alloc(struct_size * count);
+
+	if (vector->container == NULL)
+		return false;
+
+	/* If caller didn't supply initial value then the default
+	 * of all zeros is expected, which is exactly what dal_alloc()
+	 * initialises the memory to. */
+	if (NULL != initial_value) {
+		for (i = 0; i < count; ++i)
+			memmove(
+				vector->container + i * struct_size,
+				initial_value,
+				struct_size);
+	}
+
+	vector->capacity = count;
+	vector->struct_size = struct_size;
+	vector->count = count;
+	return true;
+}
+
+struct vector *dal_vector_presized_create(
+	struct dc_context *ctx,
+	uint32_t size,
+	void *initial_value,
+	uint32_t struct_size)
+{
+	struct vector *vector = dm_alloc(sizeof(struct vector));
+
+	if (vector == NULL)
+		return NULL;
+
+	if (dal_vector_presized_costruct(
+		vector, ctx, size, initial_value, struct_size))
+		return vector;
+
+	BREAK_TO_DEBUGGER();
+	dm_free(vector);
+	return NULL;
+}
+
+struct vector *dal_vector_create(
+	struct dc_context *ctx,
+	uint32_t capacity,
+	uint32_t struct_size)
+{
+	struct vector *vector = dm_alloc(sizeof(struct vector));
+
+	if (vector == NULL)
+		return NULL;
+
+	if (dal_vector_construct(vector, ctx, capacity, struct_size))
+		return vector;
+
+	BREAK_TO_DEBUGGER();
+	dm_free(vector);
+	return NULL;
+}
+
+void dal_vector_destruct(
+	struct vector *vector)
+{
+	if (vector->container != NULL)
+		dm_free(vector->container);
+	vector->count = 0;
+	vector->capacity = 0;
+}
+
+void dal_vector_destroy(
+	struct vector **vector)
+{
+	if (vector == NULL || *vector == NULL)
+		return;
+	dal_vector_destruct(*vector);
+	dm_free(*vector);
+	*vector = NULL;
+}
+
+uint32_t dal_vector_get_count(
+	const struct vector *vector)
+{
+	return vector->count;
+}
+
+void *dal_vector_at_index(
+	const struct vector *vector,
+	uint32_t index)
+{
+	if (vector->container == NULL || index >= vector->count)
+		return NULL;
+	return vector->container + (index * vector->struct_size);
+}
+
+bool dal_vector_remove_at_index(
+	struct vector *vector,
+	uint32_t index)
+{
+	if (index >= vector->count)
+		return false;
+
+	if (index != vector->count - 1)
+		memmove(
+			vector->container + (index * vector->struct_size),
+			vector->container + ((index + 1) * vector->struct_size),
+			(vector->count - index - 1) * vector->struct_size);
+	vector->count -= 1;
+
+	return true;
+}
+
+void dal_vector_set_at_index(
+	const struct vector *vector,
+	const void *what,
+	uint32_t index)
+{
+	void *where = dal_vector_at_index(vector, index);
+
+	if (!where) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+	memmove(
+		where,
+		what,
+		vector->struct_size);
+}
+
+static inline uint32_t calc_increased_capacity(
+	uint32_t old_capacity)
+{
+	return old_capacity * 2;
+}
+
+bool dal_vector_insert_at(
+	struct vector *vector,
+	const void *what,
+	uint32_t position)
+{
+	uint8_t *insert_address;
+
+	if (vector->count == vector->capacity) {
+		if (!dal_vector_reserve(
+			vector,
+			calc_increased_capacity(vector->capacity)))
+			return false;
+	}
+
+	insert_address = vector->container + (vector->struct_size * position);
+
+	if (vector->count && position < vector->count)
+		memmove(
+			insert_address + vector->struct_size,
+			insert_address,
+			vector->struct_size * (vector->count - position));
+
+	memmove(
+		insert_address,
+		what,
+		vector->struct_size);
+
+	vector->count++;
+
+	return true;
+}
+
+bool dal_vector_append(
+	struct vector *vector,
+	const void *item)
+{
+	return dal_vector_insert_at(vector, item, vector->count);
+}
+
+struct vector *dal_vector_clone(
+	const struct vector *vector)
+{
+	struct vector *vec_cloned;
+	uint32_t count;
+
+	/* create new vector */
+	count = dal_vector_get_count(vector);
+
+	if (count == 0)
+		/* when count is 0 we still want to create clone of the vector
+		 */
+		vec_cloned = dal_vector_create(
+			vector->ctx,
+			vector->capacity,
+			vector->struct_size);
+	else
+		/* Call "presized create" version, independently of how the
+		 * original vector was created.
+		 * The owner of original vector must know how to treat the new
+		 * vector - as "presized" or as "regular".
+		 * But from vector point of view it doesn't matter. */
+		vec_cloned = dal_vector_presized_create(vector->ctx, count,
+			NULL,/* no initial value */
+			vector->struct_size);
+
+	if (NULL == vec_cloned) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	/* copy vector's data */
+	memmove(vec_cloned->container, vector->container,
+			vec_cloned->struct_size * vec_cloned->capacity);
+
+	return vec_cloned;
+}
+
+uint32_t dal_vector_capacity(const struct vector *vector)
+{
+	return vector->capacity;
+}
+
+bool dal_vector_reserve(struct vector *vector, uint32_t capacity)
+{
+	void *new_container;
+
+	if (capacity <= vector->capacity)
+		return true;
+
+	new_container = dm_realloc(vector->container, capacity * vector->struct_size);
+
+	if (new_container) {
+		vector->container = new_container;
+		vector->capacity = capacity;
+		return true;
+	}
+
+	return false;
+}
+
+void dal_vector_clear(struct vector *vector)
+{
+	vector->count = 0;
+}

+ 24 - 0
drivers/gpu/drm/amd/display/dc/bios/Makefile

@@ -0,0 +1,24 @@
+#
+# Makefile for the 'bios' sub-component of DAL.
+# It provides the parsing and executing controls for atom bios image.
+
+BIOS = bios_parser.o bios_parser_interface.o  bios_parser_helper.o command_table.o command_table_helper.o
+
+AMD_DAL_BIOS = $(addprefix $(AMDDALPATH)/dc/bios/,$(BIOS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_BIOS)
+
+###############################################################################
+# DCE 8x
+###############################################################################
+# All DCE8.x are derived from DCE8.0, so 8.0 MUST be defined if ANY of
+# DCE8.x is compiled.
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce80/command_table_helper_dce80.o
+
+###############################################################################
+# DCE 11x
+###############################################################################
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce110/command_table_helper_dce110.o
+
+ccflags-y += -DLATEST_ATOM_BIOS_SUPPORT
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce112/command_table_helper_dce112.o

+ 4220 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c

@@ -0,0 +1,4220 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "dc_bios_types.h"
+#include "include/gpio_service_interface.h"
+#include "include/grph_object_ctrl_defs.h"
+#include "include/bios_parser_interface.h"
+#include "include/i2caux_interface.h"
+#include "include/logger_interface.h"
+
+#include "command_table.h"
+#include "bios_parser_helper.h"
+#include "command_table_helper.h"
+#include "bios_parser.h"
+#include "bios_parser_types_internal.h"
+#include "bios_parser_interface.h"
+
+/* TODO remove - only needed for default i2c speed */
+#include "dc.h"
+
+#define THREE_PERCENT_OF_10000 300
+
+#define LAST_RECORD_TYPE 0xff
+
+/* GUID to validate external display connection info table (aka OPM module) */
+static const uint8_t ext_display_connection_guid[NUMBER_OF_UCHAR_FOR_GUID] = {
+	0x91, 0x6E, 0x57, 0x09,
+	0x3F, 0x6D, 0xD2, 0x11,
+	0x39, 0x8E, 0x00, 0xA0,
+	0xC9, 0x69, 0x72, 0x3B};
+
+#define DATA_TABLES(table) (bp->master_data_tbl->ListOfDataTables.table)
+
+static enum object_type object_type_from_bios_object_id(
+	uint32_t bios_object_id);
+static struct graphics_object_id object_id_from_bios_object_id(
+	uint32_t bios_object_id);
+static enum object_enum_id enum_id_from_bios_object_id(uint32_t bios_object_id);
+static enum encoder_id encoder_id_from_bios_object_id(uint32_t bios_object_id);
+static enum connector_id connector_id_from_bios_object_id(
+	uint32_t bios_object_id);
+static uint32_t id_from_bios_object_id(enum object_type type,
+	uint32_t bios_object_id);
+static uint32_t gpu_id_from_bios_object_id(uint32_t bios_object_id);
+static enum generic_id generic_id_from_bios_object_id(uint32_t bios_object_id);
+static void get_atom_data_table_revision(
+	ATOM_COMMON_TABLE_HEADER *atom_data_tbl,
+	struct atom_data_revision *tbl_revision);
+static uint32_t get_dst_number_from_object(struct bios_parser *bp,
+	ATOM_OBJECT *object);
+static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
+	uint16_t **id_list);
+static uint32_t get_dest_obj_list(struct bios_parser *bp,
+	ATOM_OBJECT *object, uint16_t **id_list);
+static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
+	struct graphics_object_id id);
+static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
+	ATOM_I2C_RECORD *record,
+	struct graphics_object_i2c_info *info);
+static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
+	ATOM_OBJECT *object);
+static struct device_id device_type_from_device_id(uint16_t device_id);
+static uint32_t signal_to_ss_id(enum as_signal_type signal);
+static uint32_t get_support_mask_for_device_id(struct device_id device_id);
+static ATOM_ENCODER_CAP_RECORD *get_encoder_cap_record(
+	struct bios_parser *bp,
+	ATOM_OBJECT *object);
+
+#define BIOS_IMAGE_SIZE_OFFSET 2
+#define BIOS_IMAGE_SIZE_UNIT 512
+
+/*****************************************************************************/
+static bool bios_parser_construct(
+	struct bios_parser *bp,
+	struct bp_init_data *init,
+	enum dce_version dce_version);
+
+static uint8_t bios_parser_get_connectors_number(
+	struct dc_bios *dcb);
+
+static enum bp_result bios_parser_get_embedded_panel_info(
+	struct dc_bios *dcb,
+	struct embedded_panel_info *info);
+
+/*****************************************************************************/
+
+struct dc_bios *bios_parser_create(
+	struct bp_init_data *init,
+	enum dce_version dce_version)
+{
+	struct bios_parser *bp = NULL;
+
+	bp = dm_alloc(sizeof(struct bios_parser));
+	if (!bp)
+		return NULL;
+
+	if (bios_parser_construct(bp, init, dce_version))
+		return &bp->base;
+
+	dm_free(bp);
+	BREAK_TO_DEBUGGER();
+	return NULL;
+}
+
+static void destruct(struct bios_parser *bp)
+{
+	if (bp->base.bios_local_image)
+		dm_free(bp->base.bios_local_image);
+
+	if (bp->base.integrated_info)
+		dm_free(bp->base.integrated_info);
+}
+
+static void bios_parser_destroy(struct dc_bios **dcb)
+{
+	struct bios_parser *bp = BP_FROM_DCB(*dcb);
+
+	if (!bp) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	destruct(bp);
+
+	dm_free(bp);
+	*dcb = NULL;
+}
+
+static uint8_t get_number_of_objects(struct bios_parser *bp, uint32_t offset)
+{
+	ATOM_OBJECT_TABLE *table;
+
+	uint32_t object_table_offset = bp->object_info_tbl_offset + offset;
+
+	table = GET_IMAGE(ATOM_OBJECT_TABLE, object_table_offset);
+
+	if (!table)
+		return 0;
+	else
+		return table->ucNumberOfObjects;
+}
+
+static uint8_t bios_parser_get_connectors_number(struct dc_bios *dcb)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	return get_number_of_objects(bp,
+		le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset));
+}
+
+static struct graphics_object_id bios_parser_get_encoder_id(
+	struct dc_bios *dcb,
+	uint32_t i)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	struct graphics_object_id object_id = dal_graphics_object_id_init(
+		0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+
+	uint32_t encoder_table_offset = bp->object_info_tbl_offset
+		+ le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+
+	ATOM_OBJECT_TABLE *tbl =
+		GET_IMAGE(ATOM_OBJECT_TABLE, encoder_table_offset);
+
+	if (tbl && tbl->ucNumberOfObjects > i) {
+		const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+
+		object_id = object_id_from_bios_object_id(id);
+	}
+
+	return object_id;
+}
+
+static struct graphics_object_id bios_parser_get_connector_id(
+	struct dc_bios *dcb,
+	uint8_t i)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	struct graphics_object_id object_id = dal_graphics_object_id_init(
+		0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+
+	uint32_t connector_table_offset = bp->object_info_tbl_offset
+		+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+
+	ATOM_OBJECT_TABLE *tbl =
+		GET_IMAGE(ATOM_OBJECT_TABLE, connector_table_offset);
+
+	if (tbl && tbl->ucNumberOfObjects > i) {
+		const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+
+		object_id = object_id_from_bios_object_id(id);
+	}
+
+	return object_id;
+}
+
+static uint32_t bios_parser_get_dst_number(struct dc_bios *dcb,
+	struct graphics_object_id id)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	ATOM_OBJECT *object = get_bios_object(bp, id);
+
+	return get_dst_number_from_object(bp, object);
+}
+
+static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb,
+	struct graphics_object_id object_id, uint32_t index,
+	struct graphics_object_id *src_object_id)
+{
+	uint32_t number;
+	uint16_t *id;
+	ATOM_OBJECT *object;
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!src_object_id)
+		return BP_RESULT_BADINPUT;
+
+	object = get_bios_object(bp, object_id);
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid object id */
+		return BP_RESULT_BADINPUT;
+	}
+
+	number = get_src_obj_list(bp, object, &id);
+
+	if (number <= index)
+		return BP_RESULT_BADINPUT;
+
+	*src_object_id = object_id_from_bios_object_id(id[index]);
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
+	struct graphics_object_id object_id, uint32_t index,
+	struct graphics_object_id *dest_object_id)
+{
+	uint32_t number;
+	uint16_t *id;
+	ATOM_OBJECT *object;
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!dest_object_id)
+		return BP_RESULT_BADINPUT;
+
+	object = get_bios_object(bp, object_id);
+
+	number = get_dest_obj_list(bp, object, &id);
+
+	if (number <= index)
+		return BP_RESULT_BADINPUT;
+
+	*dest_object_id = object_id_from_bios_object_id(id[index]);
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,
+	struct graphics_object_id id,
+	struct graphics_object_i2c_info *info)
+{
+	uint32_t offset;
+	ATOM_OBJECT *object;
+	ATOM_COMMON_RECORD_HEADER *header;
+	ATOM_I2C_RECORD *record;
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	object = get_bios_object(bp, id);
+
+	if (!object)
+		return BP_RESULT_BADINPUT;
+
+	offset = le16_to_cpu(object->usRecordOffset)
+			+ bp->object_info_tbl_offset;
+
+	for (;;) {
+		header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+		if (!header)
+			return BP_RESULT_BADBIOSTABLE;
+
+		if (LAST_RECORD_TYPE == header->ucRecordType ||
+			!header->ucRecordSize)
+			break;
+
+		if (ATOM_I2C_RECORD_TYPE == header->ucRecordType
+			&& sizeof(ATOM_I2C_RECORD) <= header->ucRecordSize) {
+			/* get the I2C info */
+			record = (ATOM_I2C_RECORD *) header;
+
+			if (get_gpio_i2c_info(bp, record, info) == BP_RESULT_OK)
+				return BP_RESULT_OK;
+		}
+
+		offset += header->ucRecordSize;
+	}
+
+	return BP_RESULT_NORECORD;
+}
+
+static enum bp_result get_voltage_ddc_info_v1(uint8_t *i2c_line,
+	ATOM_COMMON_TABLE_HEADER *header,
+	uint8_t *address)
+{
+	enum bp_result result = BP_RESULT_NORECORD;
+	ATOM_VOLTAGE_OBJECT_INFO *info =
+		(ATOM_VOLTAGE_OBJECT_INFO *) address;
+
+	uint8_t *voltage_current_object = (uint8_t *) &info->asVoltageObj[0];
+
+	while ((address + le16_to_cpu(header->usStructureSize)) > voltage_current_object) {
+		ATOM_VOLTAGE_OBJECT *object =
+			(ATOM_VOLTAGE_OBJECT *) voltage_current_object;
+
+		if ((object->ucVoltageType == SET_VOLTAGE_INIT_MODE) &&
+			(object->ucVoltageType &
+				VOLTAGE_CONTROLLED_BY_I2C_MASK)) {
+
+			*i2c_line = object->asControl.ucVoltageControlI2cLine
+					^ 0x90;
+			result = BP_RESULT_OK;
+			break;
+		}
+
+		voltage_current_object += object->ucSize;
+	}
+	return result;
+}
+
+static enum bp_result get_voltage_ddc_info_v3(uint8_t *i2c_line,
+	uint32_t index,
+	ATOM_COMMON_TABLE_HEADER *header,
+	uint8_t *address)
+{
+	enum bp_result result = BP_RESULT_NORECORD;
+	ATOM_VOLTAGE_OBJECT_INFO_V3_1 *info =
+		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *) address;
+
+	uint8_t *voltage_current_object =
+		(uint8_t *) (&(info->asVoltageObj[0]));
+
+	while ((address + le16_to_cpu(header->usStructureSize)) > voltage_current_object) {
+		ATOM_I2C_VOLTAGE_OBJECT_V3 *object =
+			(ATOM_I2C_VOLTAGE_OBJECT_V3 *) voltage_current_object;
+
+		if (object->sHeader.ucVoltageMode ==
+			ATOM_INIT_VOLTAGE_REGULATOR) {
+			if (object->sHeader.ucVoltageType == index) {
+				*i2c_line = object->ucVoltageControlI2cLine
+						^ 0x90;
+				result = BP_RESULT_OK;
+				break;
+			}
+		}
+
+		voltage_current_object += le16_to_cpu(object->sHeader.usSize);
+	}
+	return result;
+}
+
+static enum bp_result bios_parser_get_thermal_ddc_info(
+	struct dc_bios *dcb,
+	uint32_t i2c_channel_id,
+	struct graphics_object_i2c_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	ATOM_I2C_ID_CONFIG_ACCESS *config;
+	ATOM_I2C_RECORD record;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	config = (ATOM_I2C_ID_CONFIG_ACCESS *) &i2c_channel_id;
+
+	record.sucI2cId.bfHW_Capable = config->sbfAccess.bfHW_Capable;
+	record.sucI2cId.bfI2C_LineMux = config->sbfAccess.bfI2C_LineMux;
+	record.sucI2cId.bfHW_EngineID = config->sbfAccess.bfHW_EngineID;
+
+	return get_gpio_i2c_info(bp, &record, info);
+}
+
+static enum bp_result bios_parser_get_voltage_ddc_info(struct dc_bios *dcb,
+	uint32_t index,
+	struct graphics_object_i2c_info *info)
+{
+	uint8_t i2c_line = 0;
+	enum bp_result result = BP_RESULT_NORECORD;
+	uint8_t *voltage_info_address;
+	ATOM_COMMON_TABLE_HEADER *header;
+	struct atom_data_revision revision = {0};
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!DATA_TABLES(VoltageObjectInfo))
+		return result;
+
+	voltage_info_address = get_image(&bp->base, DATA_TABLES(VoltageObjectInfo), sizeof(ATOM_COMMON_TABLE_HEADER));
+
+	header = (ATOM_COMMON_TABLE_HEADER *) voltage_info_address;
+
+	get_atom_data_table_revision(header, &revision);
+
+	switch (revision.major) {
+	case 1:
+	case 2:
+		result = get_voltage_ddc_info_v1(&i2c_line, header,
+			voltage_info_address);
+		break;
+	case 3:
+		if (revision.minor != 1)
+			break;
+		result = get_voltage_ddc_info_v3(&i2c_line, index, header,
+			voltage_info_address);
+		break;
+	}
+
+	if (result == BP_RESULT_OK)
+		result = bios_parser_get_thermal_ddc_info(dcb,
+			i2c_line, info);
+
+	return result;
+}
+
+/* TODO: temporary commented out to suppress 'defined but not used' warning */
+#if 0
+static enum bp_result bios_parser_get_ddc_info_for_i2c_line(
+	struct bios_parser *bp,
+	uint8_t i2c_line, struct graphics_object_i2c_info *info)
+{
+	uint32_t offset;
+	ATOM_OBJECT *object;
+	ATOM_OBJECT_TABLE *table;
+	uint32_t i;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	offset = le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+
+	offset += bp->object_info_tbl_offset;
+
+	table = GET_IMAGE(ATOM_OBJECT_TABLE, offset);
+
+	if (!table)
+		return BP_RESULT_BADBIOSTABLE;
+
+	for (i = 0; i < table->ucNumberOfObjects; i++) {
+		object = &table->asObjects[i];
+
+		if (!object) {
+			BREAK_TO_DEBUGGER(); /* Invalid object id */
+			return BP_RESULT_BADINPUT;
+		}
+
+		offset = le16_to_cpu(object->usRecordOffset)
+				+ bp->object_info_tbl_offset;
+
+		for (;;) {
+			ATOM_COMMON_RECORD_HEADER *header =
+				GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+			if (!header)
+				return BP_RESULT_BADBIOSTABLE;
+
+			offset += header->ucRecordSize;
+
+			if (LAST_RECORD_TYPE == header->ucRecordType ||
+				!header->ucRecordSize)
+				break;
+
+			if (ATOM_I2C_RECORD_TYPE == header->ucRecordType
+				&& sizeof(ATOM_I2C_RECORD) <=
+				header->ucRecordSize) {
+				ATOM_I2C_RECORD *record =
+					(ATOM_I2C_RECORD *) header;
+
+				if (i2c_line != record->sucI2cId.bfI2C_LineMux)
+					continue;
+
+				/* get the I2C info */
+				if (get_gpio_i2c_info(bp, record, info) ==
+					BP_RESULT_OK)
+					return BP_RESULT_OK;
+			}
+		}
+	}
+
+	return BP_RESULT_NORECORD;
+}
+#endif
+
+static enum bp_result bios_parser_get_hpd_info(struct dc_bios *dcb,
+	struct graphics_object_id id,
+	struct graphics_object_hpd_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	ATOM_OBJECT *object;
+	ATOM_HPD_INT_RECORD *record = NULL;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	object = get_bios_object(bp, id);
+
+	if (!object)
+		return BP_RESULT_BADINPUT;
+
+	record = get_hpd_record(bp, object);
+
+	if (record != NULL) {
+		info->hpd_int_gpio_uid = record->ucHPDIntGPIOID;
+		info->hpd_active = record->ucPlugged_PinState;
+		return BP_RESULT_OK;
+	}
+
+	return BP_RESULT_NORECORD;
+}
+
+static uint32_t bios_parser_get_gpio_record(
+	struct dc_bios *dcb,
+	struct graphics_object_id id,
+	struct bp_gpio_cntl_info *gpio_record,
+	uint32_t record_size)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	ATOM_COMMON_RECORD_HEADER *header = NULL;
+	ATOM_OBJECT_GPIO_CNTL_RECORD *record = NULL;
+	ATOM_OBJECT *object = get_bios_object(bp, id);
+	uint32_t offset;
+	uint32_t pins_number;
+	uint32_t i;
+
+	if (!object)
+		return 0;
+
+	/* Initialise offset */
+	offset = le16_to_cpu(object->usRecordOffset)
+			+ bp->object_info_tbl_offset;
+
+	for (;;) {
+		/* Get record header */
+		header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+		if (!header || header->ucRecordType == LAST_RECORD_TYPE ||
+			!header->ucRecordSize)
+			break;
+
+		/* If this is gpio control record - stop. We found the record */
+		if (header->ucRecordType == ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE
+			&& header->ucRecordSize
+				>= sizeof(ATOM_OBJECT_GPIO_CNTL_RECORD)) {
+			record = (ATOM_OBJECT_GPIO_CNTL_RECORD *) header;
+			break;
+		}
+
+		/* Advance to next record */
+		offset += header->ucRecordSize;
+	}
+
+	/* If we did not find a record - return */
+	if (!record)
+		return 0;
+
+	/* Extract gpio IDs from bios record (make sure we do not exceed passed
+	 *  array size) */
+	pins_number = (record->ucNumberOfPins < record_size ?
+			record->ucNumberOfPins : record_size);
+	for (i = 0; i < pins_number; i++) {
+		uint8_t output_state = ((record->asGpio[i].ucGPIO_PinState
+			& GPIO_PIN_OUTPUT_STATE_MASK)
+			>> GPIO_PIN_OUTPUT_STATE_SHIFT);
+		gpio_record[i].id = record->asGpio[i].ucGPIOID;
+
+		switch (output_state) {
+		case GPIO_PIN_STATE_ACTIVE_LOW:
+			gpio_record[i].state =
+				GPIO_PIN_OUTPUT_STATE_ACTIVE_LOW;
+			break;
+
+		case GPIO_PIN_STATE_ACTIVE_HIGH:
+			gpio_record[i].state =
+				GPIO_PIN_OUTPUT_STATE_ACTIVE_HIGH;
+			break;
+
+		default:
+			BREAK_TO_DEBUGGER(); /* Invalid Pin Output State */
+			break;
+		}
+	}
+
+	return pins_number;
+}
+
+enum bp_result bios_parser_get_device_tag_record(
+	struct bios_parser *bp,
+	ATOM_OBJECT *object,
+	ATOM_CONNECTOR_DEVICE_TAG_RECORD **record)
+{
+	ATOM_COMMON_RECORD_HEADER *header;
+	uint32_t offset;
+
+	offset = le16_to_cpu(object->usRecordOffset)
+			+ bp->object_info_tbl_offset;
+
+	for (;;) {
+		header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+		if (!header)
+			return BP_RESULT_BADBIOSTABLE;
+
+		offset += header->ucRecordSize;
+
+		if (LAST_RECORD_TYPE == header->ucRecordType ||
+			!header->ucRecordSize)
+			break;
+
+		if (ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE !=
+			header->ucRecordType)
+			continue;
+
+		if (sizeof(ATOM_CONNECTOR_DEVICE_TAG) > header->ucRecordSize)
+			continue;
+
+		*record = (ATOM_CONNECTOR_DEVICE_TAG_RECORD *) header;
+		return BP_RESULT_OK;
+	}
+
+	return BP_RESULT_NORECORD;
+}
+
+static enum bp_result bios_parser_get_device_tag(
+	struct dc_bios *dcb,
+	struct graphics_object_id connector_object_id,
+	uint32_t device_tag_index,
+	struct connector_device_tag_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	ATOM_OBJECT *object;
+	ATOM_CONNECTOR_DEVICE_TAG_RECORD *record = NULL;
+	ATOM_CONNECTOR_DEVICE_TAG *device_tag;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	/* getBiosObject will return MXM object */
+	object = get_bios_object(bp, connector_object_id);
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid object id */
+		return BP_RESULT_BADINPUT;
+	}
+
+	if (bios_parser_get_device_tag_record(bp, object, &record)
+		!= BP_RESULT_OK)
+		return BP_RESULT_NORECORD;
+
+	if (device_tag_index >= record->ucNumberOfDevice)
+		return BP_RESULT_NORECORD;
+
+	device_tag = &record->asDeviceTag[device_tag_index];
+
+	info->acpi_device = le32_to_cpu(device_tag->ulACPIDeviceEnum);
+	info->dev_id =
+		device_type_from_device_id(le16_to_cpu(device_tag->usDeviceID));
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result get_firmware_info_v1_4(
+	struct bios_parser *bp,
+	struct firmware_info *info);
+static enum bp_result get_firmware_info_v2_1(
+	struct bios_parser *bp,
+	struct firmware_info *info);
+static enum bp_result get_firmware_info_v2_2(
+	struct bios_parser *bp,
+	struct firmware_info *info);
+
+static enum bp_result bios_parser_get_firmware_info(
+	struct dc_bios *dcb,
+	struct firmware_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	enum bp_result result = BP_RESULT_BADBIOSTABLE;
+	ATOM_COMMON_TABLE_HEADER *header;
+	struct atom_data_revision revision;
+
+	if (info && DATA_TABLES(FirmwareInfo)) {
+		header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+			DATA_TABLES(FirmwareInfo));
+		get_atom_data_table_revision(header, &revision);
+		switch (revision.major) {
+		case 1:
+			switch (revision.minor) {
+			case 4:
+				result = get_firmware_info_v1_4(bp, info);
+				break;
+			default:
+				break;
+			}
+			break;
+
+		case 2:
+			switch (revision.minor) {
+			case 1:
+				result = get_firmware_info_v2_1(bp, info);
+				break;
+			case 2:
+				result = get_firmware_info_v2_2(bp, info);
+				break;
+			default:
+				break;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+
+	return result;
+}
+
+static enum bp_result get_firmware_info_v1_4(
+	struct bios_parser *bp,
+	struct firmware_info *info)
+{
+	ATOM_FIRMWARE_INFO_V1_4 *firmware_info =
+		GET_IMAGE(ATOM_FIRMWARE_INFO_V1_4,
+			DATA_TABLES(FirmwareInfo));
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	if (!firmware_info)
+		return BP_RESULT_BADBIOSTABLE;
+
+	memset(info, 0, sizeof(*info));
+
+	/* Pixel clock pll information. We need to convert from 10KHz units into
+	 * KHz units */
+	info->pll_info.crystal_frequency =
+		le16_to_cpu(firmware_info->usReferenceClock) * 10;
+	info->pll_info.min_input_pxl_clk_pll_frequency =
+		le16_to_cpu(firmware_info->usMinPixelClockPLL_Input) * 10;
+	info->pll_info.max_input_pxl_clk_pll_frequency =
+		le16_to_cpu(firmware_info->usMaxPixelClockPLL_Input) * 10;
+	info->pll_info.min_output_pxl_clk_pll_frequency =
+		le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
+	info->pll_info.max_output_pxl_clk_pll_frequency =
+		le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
+
+	if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
+		/* Since there is no information on the SS, report conservative
+		 * value 3% for bandwidth calculation */
+		/* unit of 0.01% */
+		info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
+
+	if (firmware_info->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
+		/* Since there is no information on the SS,report conservative
+		 * value 3% for bandwidth calculation */
+		/* unit of 0.01% */
+		info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result get_ss_info_v3_1(
+	struct bios_parser *bp,
+	uint32_t id,
+	uint32_t index,
+	struct spread_spectrum_info *ss_info);
+
+static enum bp_result get_firmware_info_v2_1(
+	struct bios_parser *bp,
+	struct firmware_info *info)
+{
+	ATOM_FIRMWARE_INFO_V2_1 *firmwareInfo =
+		GET_IMAGE(ATOM_FIRMWARE_INFO_V2_1, DATA_TABLES(FirmwareInfo));
+	struct spread_spectrum_info internalSS;
+	uint32_t index;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	if (!firmwareInfo)
+		return BP_RESULT_BADBIOSTABLE;
+
+	memset(info, 0, sizeof(*info));
+
+	/* Pixel clock pll information. We need to convert from 10KHz units into
+	 * KHz units */
+	info->pll_info.crystal_frequency =
+		le16_to_cpu(firmwareInfo->usCoreReferenceClock) * 10;
+	info->pll_info.min_input_pxl_clk_pll_frequency =
+		le16_to_cpu(firmwareInfo->usMinPixelClockPLL_Input) * 10;
+	info->pll_info.max_input_pxl_clk_pll_frequency =
+		le16_to_cpu(firmwareInfo->usMaxPixelClockPLL_Input) * 10;
+	info->pll_info.min_output_pxl_clk_pll_frequency =
+		le32_to_cpu(firmwareInfo->ulMinPixelClockPLL_Output) * 10;
+	info->pll_info.max_output_pxl_clk_pll_frequency =
+		le32_to_cpu(firmwareInfo->ulMaxPixelClockPLL_Output) * 10;
+	info->default_display_engine_pll_frequency =
+		le32_to_cpu(firmwareInfo->ulDefaultDispEngineClkFreq) * 10;
+	info->external_clock_source_frequency_for_dp =
+		le16_to_cpu(firmwareInfo->usUniphyDPModeExtClkFreq) * 10;
+	info->min_allowed_bl_level = firmwareInfo->ucMinAllowedBL_Level;
+
+	/* There should be only one entry in the SS info table for Memory Clock
+	 */
+	index = 0;
+	if (firmwareInfo->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
+		/* Since there is no information for external SS, report
+		 *  conservative value 3% for bandwidth calculation */
+		/* unit of 0.01% */
+		info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
+	else if (get_ss_info_v3_1(bp,
+		ASIC_INTERNAL_MEMORY_SS, index, &internalSS) == BP_RESULT_OK) {
+		if (internalSS.spread_spectrum_percentage) {
+			info->feature.memory_clk_ss_percentage =
+				internalSS.spread_spectrum_percentage;
+			if (internalSS.type.CENTER_MODE) {
+				/* if it is centermode, the exact SS Percentage
+				 * will be round up of half of the percentage
+				 * reported in the SS table */
+				++info->feature.memory_clk_ss_percentage;
+				info->feature.memory_clk_ss_percentage /= 2;
+			}
+		}
+	}
+
+	/* There should be only one entry in the SS info table for Engine Clock
+	 */
+	index = 1;
+	if (firmwareInfo->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
+		/* Since there is no information for external SS, report
+		 * conservative value 3% for bandwidth calculation */
+		/* unit of 0.01% */
+		info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
+	else if (get_ss_info_v3_1(bp,
+		ASIC_INTERNAL_ENGINE_SS, index, &internalSS) == BP_RESULT_OK) {
+		if (internalSS.spread_spectrum_percentage) {
+			info->feature.engine_clk_ss_percentage =
+				internalSS.spread_spectrum_percentage;
+			if (internalSS.type.CENTER_MODE) {
+				/* if it is centermode, the exact SS Percentage
+				 * will be round up of half of the percentage
+				 * reported in the SS table */
+				++info->feature.engine_clk_ss_percentage;
+				info->feature.engine_clk_ss_percentage /= 2;
+			}
+		}
+	}
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result get_firmware_info_v2_2(
+	struct bios_parser *bp,
+	struct firmware_info *info)
+{
+	ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
+	struct spread_spectrum_info internal_ss;
+	uint32_t index;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	firmware_info = GET_IMAGE(ATOM_FIRMWARE_INFO_V2_2,
+		DATA_TABLES(FirmwareInfo));
+
+	if (!firmware_info)
+		return BP_RESULT_BADBIOSTABLE;
+
+	memset(info, 0, sizeof(*info));
+
+	/* Pixel clock pll information. We need to convert from 10KHz units into
+	 * KHz units */
+	info->pll_info.crystal_frequency =
+		le16_to_cpu(firmware_info->usCoreReferenceClock) * 10;
+	info->pll_info.min_input_pxl_clk_pll_frequency =
+		le16_to_cpu(firmware_info->usMinPixelClockPLL_Input) * 10;
+	info->pll_info.max_input_pxl_clk_pll_frequency =
+		le16_to_cpu(firmware_info->usMaxPixelClockPLL_Input) * 10;
+	info->pll_info.min_output_pxl_clk_pll_frequency =
+		le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
+	info->pll_info.max_output_pxl_clk_pll_frequency =
+		le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
+	info->default_display_engine_pll_frequency =
+		le32_to_cpu(firmware_info->ulDefaultDispEngineClkFreq) * 10;
+	info->external_clock_source_frequency_for_dp =
+		le16_to_cpu(firmware_info->usUniphyDPModeExtClkFreq) * 10;
+
+	/* There should be only one entry in the SS info table for Memory Clock
+	 */
+	index = 0;
+	if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
+		/* Since there is no information for external SS, report
+		 *  conservative value 3% for bandwidth calculation */
+		/* unit of 0.01% */
+		info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
+	else if (get_ss_info_v3_1(bp,
+			ASIC_INTERNAL_MEMORY_SS, index, &internal_ss) == BP_RESULT_OK) {
+		if (internal_ss.spread_spectrum_percentage) {
+			info->feature.memory_clk_ss_percentage =
+					internal_ss.spread_spectrum_percentage;
+			if (internal_ss.type.CENTER_MODE) {
+				/* if it is centermode, the exact SS Percentage
+				 * will be round up of half of the percentage
+				 * reported in the SS table */
+				++info->feature.memory_clk_ss_percentage;
+				info->feature.memory_clk_ss_percentage /= 2;
+			}
+		}
+	}
+
+	/* There should be only one entry in the SS info table for Engine Clock
+	 */
+	index = 1;
+	if (firmware_info->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
+		/* Since there is no information for external SS, report
+		 * conservative value 3% for bandwidth calculation */
+		/* unit of 0.01% */
+		info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
+	else if (get_ss_info_v3_1(bp,
+			ASIC_INTERNAL_ENGINE_SS, index, &internal_ss) == BP_RESULT_OK) {
+		if (internal_ss.spread_spectrum_percentage) {
+			info->feature.engine_clk_ss_percentage =
+					internal_ss.spread_spectrum_percentage;
+			if (internal_ss.type.CENTER_MODE) {
+				/* if it is centermode, the exact SS Percentage
+				 * will be round up of half of the percentage
+				 * reported in the SS table */
+				++info->feature.engine_clk_ss_percentage;
+				info->feature.engine_clk_ss_percentage /= 2;
+			}
+		}
+	}
+
+	/* Remote Display */
+	info->remote_display_config = firmware_info->ucRemoteDisplayConfig;
+
+	/* Is allowed minimum BL level */
+	info->min_allowed_bl_level = firmware_info->ucMinAllowedBL_Level;
+	/* Used starting from CI */
+	info->smu_gpu_pll_output_freq =
+			(uint32_t) (le32_to_cpu(firmware_info->ulGPUPLL_OutputFreq) * 10);
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result get_ss_info_v3_1(
+	struct bios_parser *bp,
+	uint32_t id,
+	uint32_t index,
+	struct spread_spectrum_info *ss_info)
+{
+	ATOM_ASIC_INTERNAL_SS_INFO_V3 *ss_table_header_include;
+	ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl;
+	uint32_t table_size;
+	uint32_t i;
+	uint32_t table_index = 0;
+
+	if (!ss_info)
+		return BP_RESULT_BADINPUT;
+
+	if (!DATA_TABLES(ASIC_InternalSS_Info))
+		return BP_RESULT_UNSUPPORTED;
+
+	ss_table_header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3,
+		DATA_TABLES(ASIC_InternalSS_Info));
+	table_size =
+		(le16_to_cpu(ss_table_header_include->sHeader.usStructureSize)
+				- sizeof(ATOM_COMMON_TABLE_HEADER))
+				/ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+
+	tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *)
+				&ss_table_header_include->asSpreadSpectrum[0];
+
+	memset(ss_info, 0, sizeof(struct spread_spectrum_info));
+
+	for (i = 0; i < table_size; i++) {
+		if (tbl[i].ucClockIndication != (uint8_t) id)
+			continue;
+
+		if (table_index != index) {
+			table_index++;
+			continue;
+		}
+		/* VBIOS introduced new defines for Version 3, same values as
+		 *  before, so now use these new ones for Version 3.
+		 * Shouldn't affect field VBIOS's V3 as define values are still
+		 *  same.
+		 * #define SS_MODE_V3_CENTRE_SPREAD_MASK                0x01
+		 * #define SS_MODE_V3_EXTERNAL_SS_MASK                  0x02
+
+		 * Old VBIOS defines:
+		 * #define ATOM_SS_CENTRE_SPREAD_MODE_MASK        0x00000001
+		 * #define ATOM_EXTERNAL_SS_MASK                  0x00000002
+		 */
+
+		if (SS_MODE_V3_EXTERNAL_SS_MASK & tbl[i].ucSpreadSpectrumMode)
+			ss_info->type.EXTERNAL = true;
+
+		if (SS_MODE_V3_CENTRE_SPREAD_MASK & tbl[i].ucSpreadSpectrumMode)
+			ss_info->type.CENTER_MODE = true;
+
+		/* Older VBIOS (in field) always provides SS percentage in 0.01%
+		 * units set Divider to 100 */
+		ss_info->spread_percentage_divider = 100;
+
+		/* #define SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK 0x10 */
+		if (SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK
+				& tbl[i].ucSpreadSpectrumMode)
+			ss_info->spread_percentage_divider = 1000;
+
+		ss_info->type.STEP_AND_DELAY_INFO = false;
+		/* convert [10KHz] into [KHz] */
+		ss_info->target_clock_range =
+				le32_to_cpu(tbl[i].ulTargetClockRange) * 10;
+		ss_info->spread_spectrum_percentage =
+				(uint32_t)le16_to_cpu(tbl[i].usSpreadSpectrumPercentage);
+		ss_info->spread_spectrum_range =
+				(uint32_t)(le16_to_cpu(tbl[i].usSpreadRateIn10Hz) * 10);
+
+		return BP_RESULT_OK;
+	}
+	return BP_RESULT_NORECORD;
+}
+
+static enum bp_result bios_parser_transmitter_control(
+	struct dc_bios *dcb,
+	struct bp_transmitter_control *cntl)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.transmitter_control)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.transmitter_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_encoder_control(
+	struct dc_bios *dcb,
+	struct bp_encoder_control *cntl)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.dig_encoder_control)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.dig_encoder_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_adjust_pixel_clock(
+	struct dc_bios *dcb,
+	struct bp_adjust_pixel_clock_parameters *bp_params)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.adjust_display_pll)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.adjust_display_pll(bp, bp_params);
+}
+
+static enum bp_result bios_parser_set_pixel_clock(
+	struct dc_bios *dcb,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.set_pixel_clock)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.set_pixel_clock(bp, bp_params);
+}
+
+static enum bp_result bios_parser_set_dce_clock(
+	struct dc_bios *dcb,
+	struct bp_set_dce_clock_parameters *bp_params)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.set_dce_clock)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.set_dce_clock(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_spread_spectrum_on_ppll(
+	struct dc_bios *dcb,
+	struct bp_spread_spectrum_parameters *bp_params,
+	bool enable)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.enable_spread_spectrum_on_ppll)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.enable_spread_spectrum_on_ppll(
+			bp, bp_params, enable);
+
+}
+
+static enum bp_result bios_parser_program_crtc_timing(
+	struct dc_bios *dcb,
+	struct bp_hw_crtc_timing_parameters *bp_params)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.set_crtc_timing)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.set_crtc_timing(bp, bp_params);
+}
+
+static enum bp_result bios_parser_program_display_engine_pll(
+	struct dc_bios *dcb,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.program_clock)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.program_clock(bp, bp_params);
+
+}
+
+
+static enum bp_result bios_parser_enable_crtc(
+	struct dc_bios *dcb,
+	enum controller_id id,
+	bool enable)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.enable_crtc)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.enable_crtc(bp, id, enable);
+}
+
+static enum bp_result bios_parser_crtc_source_select(
+	struct dc_bios *dcb,
+	struct bp_crtc_source_select *bp_params)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.select_crtc_source)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.select_crtc_source(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_disp_power_gating(
+	struct dc_bios *dcb,
+	enum controller_id controller_id,
+	enum bp_pipe_control_action action)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.enable_disp_power_gating)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.enable_disp_power_gating(bp, controller_id,
+		action);
+}
+
+static bool bios_parser_is_device_id_supported(
+	struct dc_bios *dcb,
+	struct device_id id)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	uint32_t mask = get_support_mask_for_device_id(id);
+
+	return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0;
+}
+
+static enum bp_result bios_parser_crt_control(
+	struct dc_bios *dcb,
+	enum engine_id engine_id,
+	bool enable,
+	uint32_t pixel_clock)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	uint8_t standard;
+
+	if (!bp->cmd_tbl.dac1_encoder_control &&
+		engine_id == ENGINE_ID_DACA)
+		return BP_RESULT_FAILURE;
+	if (!bp->cmd_tbl.dac2_encoder_control &&
+		engine_id == ENGINE_ID_DACB)
+		return BP_RESULT_FAILURE;
+	/* validate params */
+	switch (engine_id) {
+	case ENGINE_ID_DACA:
+	case ENGINE_ID_DACB:
+		break;
+	default:
+		/* unsupported engine */
+		return BP_RESULT_FAILURE;
+	}
+
+	standard = ATOM_DAC1_PS2; /* == ATOM_DAC2_PS2 */
+
+	if (enable) {
+		if (engine_id == ENGINE_ID_DACA) {
+			bp->cmd_tbl.dac1_encoder_control(bp, enable,
+				pixel_clock, standard);
+			if (bp->cmd_tbl.dac1_output_control != NULL)
+				bp->cmd_tbl.dac1_output_control(bp, enable);
+		} else {
+			bp->cmd_tbl.dac2_encoder_control(bp, enable,
+				pixel_clock, standard);
+			if (bp->cmd_tbl.dac2_output_control != NULL)
+				bp->cmd_tbl.dac2_output_control(bp, enable);
+		}
+	} else {
+		if (engine_id == ENGINE_ID_DACA) {
+			if (bp->cmd_tbl.dac1_output_control != NULL)
+				bp->cmd_tbl.dac1_output_control(bp, enable);
+			bp->cmd_tbl.dac1_encoder_control(bp, enable,
+				pixel_clock, standard);
+		} else {
+			if (bp->cmd_tbl.dac2_output_control != NULL)
+				bp->cmd_tbl.dac2_output_control(bp, enable);
+			bp->cmd_tbl.dac2_encoder_control(bp, enable,
+				pixel_clock, standard);
+		}
+	}
+
+	return BP_RESULT_OK;
+}
+
+static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
+	ATOM_OBJECT *object)
+{
+	ATOM_COMMON_RECORD_HEADER *header;
+	uint32_t offset;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid object */
+		return NULL;
+	}
+
+	offset = le16_to_cpu(object->usRecordOffset)
+			+ bp->object_info_tbl_offset;
+
+	for (;;) {
+		header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+		if (!header)
+			return NULL;
+
+		if (LAST_RECORD_TYPE == header->ucRecordType ||
+			!header->ucRecordSize)
+			break;
+
+		if (ATOM_HPD_INT_RECORD_TYPE == header->ucRecordType
+			&& sizeof(ATOM_HPD_INT_RECORD) <= header->ucRecordSize)
+			return (ATOM_HPD_INT_RECORD *) header;
+
+		offset += header->ucRecordSize;
+	}
+
+	return NULL;
+}
+
+/**
+ * Get I2C information of input object id
+ *
+ * search all records to find the ATOM_I2C_RECORD_TYPE record IR
+ */
+static ATOM_I2C_RECORD *get_i2c_record(
+	struct bios_parser *bp,
+	ATOM_OBJECT *object)
+{
+	uint32_t offset;
+	ATOM_COMMON_RECORD_HEADER *record_header;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER();
+		/* Invalid object */
+		return NULL;
+	}
+
+	offset = le16_to_cpu(object->usRecordOffset)
+			+ bp->object_info_tbl_offset;
+
+	for (;;) {
+		record_header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+		if (!record_header)
+			return NULL;
+
+		if (LAST_RECORD_TYPE == record_header->ucRecordType ||
+			0 == record_header->ucRecordSize)
+			break;
+
+		if (ATOM_I2C_RECORD_TYPE == record_header->ucRecordType &&
+			sizeof(ATOM_I2C_RECORD) <=
+			record_header->ucRecordSize) {
+			return (ATOM_I2C_RECORD *)record_header;
+		}
+
+		offset += record_header->ucRecordSize;
+	}
+
+	return NULL;
+}
+
+static enum bp_result get_ss_info_from_ss_info_table(
+	struct bios_parser *bp,
+	uint32_t id,
+	struct spread_spectrum_info *ss_info);
+static enum bp_result get_ss_info_from_tbl(
+	struct bios_parser *bp,
+	uint32_t id,
+	struct spread_spectrum_info *ss_info);
+/**
+ * bios_parser_get_spread_spectrum_info
+ * Get spread spectrum information from the ASIC_InternalSS_Info(ver 2.1 or
+ * ver 3.1) or SS_Info table from the VBIOS. Currently ASIC_InternalSS_Info
+ * ver 2.1 can co-exist with SS_Info table. Expect ASIC_InternalSS_Info ver 3.1,
+ * there is only one entry for each signal /ss id.  However, there is
+ * no planning of supporting multiple spread Sprectum entry for EverGreen
+ * @param [in] this
+ * @param [in] signal, ASSignalType to be converted to info index
+ * @param [in] index, number of entries that match the converted info index
+ * @param [out] ss_info, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result bios_parser_get_spread_spectrum_info(
+	struct dc_bios *dcb,
+	enum as_signal_type signal,
+	uint32_t index,
+	struct spread_spectrum_info *ss_info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	enum bp_result result = BP_RESULT_UNSUPPORTED;
+	uint32_t clk_id_ss = 0;
+	ATOM_COMMON_TABLE_HEADER *header;
+	struct atom_data_revision tbl_revision;
+
+	if (!ss_info) /* check for bad input */
+		return BP_RESULT_BADINPUT;
+	/* signal translation */
+	clk_id_ss = signal_to_ss_id(signal);
+
+	if (!DATA_TABLES(ASIC_InternalSS_Info))
+		if (!index)
+			return get_ss_info_from_ss_info_table(bp, clk_id_ss,
+				ss_info);
+
+	header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+		DATA_TABLES(ASIC_InternalSS_Info));
+	get_atom_data_table_revision(header, &tbl_revision);
+
+	switch (tbl_revision.major) {
+	case 2:
+		switch (tbl_revision.minor) {
+		case 1:
+			/* there can not be more then one entry for Internal
+			 * SS Info table version 2.1 */
+			if (!index)
+				return get_ss_info_from_tbl(bp, clk_id_ss,
+						ss_info);
+			break;
+		default:
+			break;
+		}
+		break;
+
+	case 3:
+		switch (tbl_revision.minor) {
+		case 1:
+			return get_ss_info_v3_1(bp, clk_id_ss, index, ss_info);
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+	/* there can not be more then one entry for SS Info table */
+	return result;
+}
+
+static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
+	struct bios_parser *bp,
+	uint32_t id,
+	struct spread_spectrum_info *info);
+
+/**
+ * get_ss_info_from_table
+ * Get spread sprectrum information from the ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info table from the VBIOS
+ * There can not be more than 1 entry for  ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info.
+ *
+ * @param this
+ * @param id, spread sprectrum info index
+ * @param pSSinfo, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result get_ss_info_from_tbl(
+	struct bios_parser *bp,
+	uint32_t id,
+	struct spread_spectrum_info *ss_info)
+{
+	if (!ss_info) /* check for bad input, if ss_info is not NULL */
+		return BP_RESULT_BADINPUT;
+	/* for SS_Info table only support DP and LVDS */
+	if (id == ASIC_INTERNAL_SS_ON_DP || id == ASIC_INTERNAL_SS_ON_LVDS)
+		return get_ss_info_from_ss_info_table(bp, id, ss_info);
+	else
+		return get_ss_info_from_internal_ss_info_tbl_V2_1(bp, id,
+			ss_info);
+}
+
+/**
+ * get_ss_info_from_internal_ss_info_tbl_V2_1
+ * Get spread sprectrum information from the ASIC_InternalSS_Info table Ver 2.1
+ * from the VBIOS
+ * There will not be multiple entry for Ver 2.1
+ *
+ * @param id, spread sprectrum info index
+ * @param pSSinfo, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
+	struct bios_parser *bp,
+	uint32_t id,
+	struct spread_spectrum_info *info)
+{
+	enum bp_result result = BP_RESULT_UNSUPPORTED;
+	ATOM_ASIC_INTERNAL_SS_INFO_V2 *header;
+	ATOM_ASIC_SS_ASSIGNMENT_V2 *tbl;
+	uint32_t tbl_size, i;
+
+	if (!DATA_TABLES(ASIC_InternalSS_Info))
+		return result;
+
+	header = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2,
+		DATA_TABLES(ASIC_InternalSS_Info));
+
+	memset(info, 0, sizeof(struct spread_spectrum_info));
+
+	tbl_size = (le16_to_cpu(header->sHeader.usStructureSize)
+			- sizeof(ATOM_COMMON_TABLE_HEADER))
+					/ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+
+	tbl = (ATOM_ASIC_SS_ASSIGNMENT_V2 *)
+					&(header->asSpreadSpectrum[0]);
+	for (i = 0; i < tbl_size; i++) {
+		result = BP_RESULT_NORECORD;
+
+		if (tbl[i].ucClockIndication != (uint8_t)id)
+			continue;
+
+		if (ATOM_EXTERNAL_SS_MASK
+			& tbl[i].ucSpreadSpectrumMode) {
+			info->type.EXTERNAL = true;
+		}
+		if (ATOM_SS_CENTRE_SPREAD_MODE_MASK
+			& tbl[i].ucSpreadSpectrumMode) {
+			info->type.CENTER_MODE = true;
+		}
+		info->type.STEP_AND_DELAY_INFO = false;
+		/* convert [10KHz] into [KHz] */
+		info->target_clock_range =
+			le32_to_cpu(tbl[i].ulTargetClockRange) * 10;
+		info->spread_spectrum_percentage =
+			(uint32_t)le16_to_cpu(tbl[i].usSpreadSpectrumPercentage);
+		info->spread_spectrum_range =
+			(uint32_t)(le16_to_cpu(tbl[i].usSpreadRateIn10Hz) * 10);
+		result = BP_RESULT_OK;
+		break;
+	}
+
+	return result;
+
+}
+
+/**
+ * get_ss_info_from_ss_info_table
+ * Get spread sprectrum information from the SS_Info table from the VBIOS
+ * if the pointer to info is NULL, indicate the caller what to know the number
+ * of entries that matches the id
+ * for, the SS_Info table, there should not be more than 1 entry match.
+ *
+ * @param [in] id, spread sprectrum id
+ * @param [out] pSSinfo, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result get_ss_info_from_ss_info_table(
+	struct bios_parser *bp,
+	uint32_t id,
+	struct spread_spectrum_info *ss_info)
+{
+	enum bp_result result = BP_RESULT_UNSUPPORTED;
+	ATOM_SPREAD_SPECTRUM_INFO *tbl;
+	ATOM_COMMON_TABLE_HEADER *header;
+	uint32_t table_size;
+	uint32_t i;
+	uint32_t id_local = SS_ID_UNKNOWN;
+	struct atom_data_revision revision;
+
+	/* exist of the SS_Info table */
+	/* check for bad input, pSSinfo can not be NULL */
+	if (!DATA_TABLES(SS_Info) || !ss_info)
+		return result;
+
+	header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(SS_Info));
+	get_atom_data_table_revision(header, &revision);
+
+	tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info));
+
+	if (1 != revision.major || 2 > revision.minor)
+		return result;
+
+	/* have to convert from Internal_SS format to SS_Info format */
+	switch (id) {
+	case ASIC_INTERNAL_SS_ON_DP:
+		id_local = SS_ID_DP1;
+		break;
+	case ASIC_INTERNAL_SS_ON_LVDS:
+	{
+		struct embedded_panel_info panel_info;
+
+		if (bios_parser_get_embedded_panel_info(&bp->base, &panel_info)
+				== BP_RESULT_OK)
+			id_local = panel_info.ss_id;
+		break;
+	}
+	default:
+		break;
+	}
+
+	if (id_local == SS_ID_UNKNOWN)
+		return result;
+
+	table_size = (le16_to_cpu(tbl->sHeader.usStructureSize) -
+			sizeof(ATOM_COMMON_TABLE_HEADER)) /
+					sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
+
+	for (i = 0; i < table_size; i++) {
+		if (id_local != (uint32_t)tbl->asSS_Info[i].ucSS_Id)
+			continue;
+
+		memset(ss_info, 0, sizeof(struct spread_spectrum_info));
+
+		if (ATOM_EXTERNAL_SS_MASK &
+				tbl->asSS_Info[i].ucSpreadSpectrumType)
+			ss_info->type.EXTERNAL = true;
+
+		if (ATOM_SS_CENTRE_SPREAD_MODE_MASK &
+				tbl->asSS_Info[i].ucSpreadSpectrumType)
+			ss_info->type.CENTER_MODE = true;
+
+		ss_info->type.STEP_AND_DELAY_INFO = true;
+		ss_info->spread_spectrum_percentage =
+			(uint32_t)le16_to_cpu(tbl->asSS_Info[i].usSpreadSpectrumPercentage);
+		ss_info->step_and_delay_info.step = tbl->asSS_Info[i].ucSS_Step;
+		ss_info->step_and_delay_info.delay =
+			tbl->asSS_Info[i].ucSS_Delay;
+		ss_info->step_and_delay_info.recommended_ref_div =
+			tbl->asSS_Info[i].ucRecommendedRef_Div;
+		ss_info->spread_spectrum_range =
+			(uint32_t)tbl->asSS_Info[i].ucSS_Range * 10000;
+
+		/* there will be only one entry for each display type in SS_info
+		 * table */
+		result = BP_RESULT_OK;
+		break;
+	}
+
+	return result;
+}
+static enum bp_result get_embedded_panel_info_v1_2(
+	struct bios_parser *bp,
+	struct embedded_panel_info *info);
+static enum bp_result get_embedded_panel_info_v1_3(
+	struct bios_parser *bp,
+	struct embedded_panel_info *info);
+
+static enum bp_result bios_parser_get_embedded_panel_info(
+	struct dc_bios *dcb,
+	struct embedded_panel_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	ATOM_COMMON_TABLE_HEADER *hdr;
+
+	if (!DATA_TABLES(LCD_Info))
+		return BP_RESULT_FAILURE;
+
+	hdr = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(LCD_Info));
+
+	if (!hdr)
+		return BP_RESULT_BADBIOSTABLE;
+
+	switch (hdr->ucTableFormatRevision) {
+	case 1:
+		switch (hdr->ucTableContentRevision) {
+		case 0:
+		case 1:
+		case 2:
+			return get_embedded_panel_info_v1_2(bp, info);
+		case 3:
+			return get_embedded_panel_info_v1_3(bp, info);
+		default:
+			break;
+		}
+	default:
+		break;
+	}
+
+	return BP_RESULT_FAILURE;
+}
+
+static enum bp_result get_embedded_panel_info_v1_2(
+	struct bios_parser *bp,
+	struct embedded_panel_info *info)
+{
+	ATOM_LVDS_INFO_V12 *lvds;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	if (!DATA_TABLES(LVDS_Info))
+		return BP_RESULT_UNSUPPORTED;
+
+	lvds =
+		GET_IMAGE(ATOM_LVDS_INFO_V12, DATA_TABLES(LVDS_Info));
+
+	if (!lvds)
+		return BP_RESULT_BADBIOSTABLE;
+
+	if (1 != lvds->sHeader.ucTableFormatRevision
+		|| 2 > lvds->sHeader.ucTableContentRevision)
+		return BP_RESULT_UNSUPPORTED;
+
+	memset(info, 0, sizeof(struct embedded_panel_info));
+
+	/* We need to convert from 10KHz units into KHz units*/
+	info->lcd_timing.pixel_clk =
+		le16_to_cpu(lvds->sLCDTiming.usPixClk) * 10;
+	/* usHActive does not include borders, according to VBIOS team*/
+	info->lcd_timing.horizontal_addressable =
+		le16_to_cpu(lvds->sLCDTiming.usHActive);
+	/* usHBlanking_Time includes borders, so we should really be subtracting
+	 * borders duing this translation, but LVDS generally*/
+	/* doesn't have borders, so we should be okay leaving this as is for
+	 * now.  May need to revisit if we ever have LVDS with borders*/
+	info->lcd_timing.horizontal_blanking_time =
+			le16_to_cpu(lvds->sLCDTiming.usHBlanking_Time);
+	/* usVActive does not include borders, according to VBIOS team*/
+	info->lcd_timing.vertical_addressable =
+			le16_to_cpu(lvds->sLCDTiming.usVActive);
+	/* usVBlanking_Time includes borders, so we should really be subtracting
+	 * borders duing this translation, but LVDS generally*/
+	/* doesn't have borders, so we should be okay leaving this as is for
+	 * now. May need to revisit if we ever have LVDS with borders*/
+	info->lcd_timing.vertical_blanking_time =
+		le16_to_cpu(lvds->sLCDTiming.usVBlanking_Time);
+	info->lcd_timing.horizontal_sync_offset =
+		le16_to_cpu(lvds->sLCDTiming.usHSyncOffset);
+	info->lcd_timing.horizontal_sync_width =
+		le16_to_cpu(lvds->sLCDTiming.usHSyncWidth);
+	info->lcd_timing.vertical_sync_offset =
+		le16_to_cpu(lvds->sLCDTiming.usVSyncOffset);
+	info->lcd_timing.vertical_sync_width =
+		le16_to_cpu(lvds->sLCDTiming.usVSyncWidth);
+	info->lcd_timing.horizontal_border = lvds->sLCDTiming.ucHBorder;
+	info->lcd_timing.vertical_border = lvds->sLCDTiming.ucVBorder;
+	info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HorizontalCutOff;
+	info->lcd_timing.misc_info.H_SYNC_POLARITY =
+		~(uint32_t)
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HSyncPolarity;
+	info->lcd_timing.misc_info.V_SYNC_POLARITY =
+		~(uint32_t)
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VSyncPolarity;
+	info->lcd_timing.misc_info.VERTICAL_CUT_OFF =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VerticalCutOff;
+	info->lcd_timing.misc_info.H_REPLICATION_BY2 =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.H_ReplicationBy2;
+	info->lcd_timing.misc_info.V_REPLICATION_BY2 =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.V_ReplicationBy2;
+	info->lcd_timing.misc_info.COMPOSITE_SYNC =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.CompositeSync;
+	info->lcd_timing.misc_info.INTERLACE =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.Interlace;
+	info->lcd_timing.misc_info.DOUBLE_CLOCK =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.DoubleClock;
+	info->ss_id = lvds->ucSS_Id;
+
+	{
+		uint8_t rr = le16_to_cpu(lvds->usSupportedRefreshRate);
+		/* Get minimum supported refresh rate*/
+		if (SUPPORTED_LCD_REFRESHRATE_30Hz & rr)
+			info->supported_rr.REFRESH_RATE_30HZ = 1;
+		else if (SUPPORTED_LCD_REFRESHRATE_40Hz & rr)
+			info->supported_rr.REFRESH_RATE_40HZ = 1;
+		else if (SUPPORTED_LCD_REFRESHRATE_48Hz & rr)
+			info->supported_rr.REFRESH_RATE_48HZ = 1;
+		else if (SUPPORTED_LCD_REFRESHRATE_50Hz & rr)
+			info->supported_rr.REFRESH_RATE_50HZ = 1;
+		else if (SUPPORTED_LCD_REFRESHRATE_60Hz & rr)
+			info->supported_rr.REFRESH_RATE_60HZ = 1;
+	}
+
+	/*Drr panel support can be reported by VBIOS*/
+	if (LCDPANEL_CAP_DRR_SUPPORTED
+			& lvds->ucLCDPanel_SpecialHandlingCap)
+		info->drr_enabled = 1;
+
+	if (ATOM_PANEL_MISC_DUAL & lvds->ucLVDS_Misc)
+		info->lcd_timing.misc_info.DOUBLE_CLOCK = true;
+
+	if (ATOM_PANEL_MISC_888RGB & lvds->ucLVDS_Misc)
+		info->lcd_timing.misc_info.RGB888 = true;
+
+	info->lcd_timing.misc_info.GREY_LEVEL =
+		(uint32_t) (ATOM_PANEL_MISC_GREY_LEVEL &
+			lvds->ucLVDS_Misc) >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT;
+
+	if (ATOM_PANEL_MISC_SPATIAL & lvds->ucLVDS_Misc)
+		info->lcd_timing.misc_info.SPATIAL = true;
+
+	if (ATOM_PANEL_MISC_TEMPORAL & lvds->ucLVDS_Misc)
+		info->lcd_timing.misc_info.TEMPORAL = true;
+
+	if (ATOM_PANEL_MISC_API_ENABLED & lvds->ucLVDS_Misc)
+		info->lcd_timing.misc_info.API_ENABLED = true;
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result get_embedded_panel_info_v1_3(
+	struct bios_parser *bp,
+	struct embedded_panel_info *info)
+{
+	ATOM_LCD_INFO_V13 *lvds;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	if (!DATA_TABLES(LCD_Info))
+		return BP_RESULT_UNSUPPORTED;
+
+	lvds = GET_IMAGE(ATOM_LCD_INFO_V13, DATA_TABLES(LCD_Info));
+
+	if (!lvds)
+		return BP_RESULT_BADBIOSTABLE;
+
+	if (!((1 == lvds->sHeader.ucTableFormatRevision)
+			&& (3 <= lvds->sHeader.ucTableContentRevision)))
+		return BP_RESULT_UNSUPPORTED;
+
+	memset(info, 0, sizeof(struct embedded_panel_info));
+
+	/* We need to convert from 10KHz units into KHz units */
+	info->lcd_timing.pixel_clk =
+			le16_to_cpu(lvds->sLCDTiming.usPixClk) * 10;
+	/* usHActive does not include borders, according to VBIOS team */
+	info->lcd_timing.horizontal_addressable =
+			le16_to_cpu(lvds->sLCDTiming.usHActive);
+	/* usHBlanking_Time includes borders, so we should really be subtracting
+	 * borders duing this translation, but LVDS generally*/
+	/* doesn't have borders, so we should be okay leaving this as is for
+	 * now.  May need to revisit if we ever have LVDS with borders*/
+	info->lcd_timing.horizontal_blanking_time =
+		le16_to_cpu(lvds->sLCDTiming.usHBlanking_Time);
+	/* usVActive does not include borders, according to VBIOS team*/
+	info->lcd_timing.vertical_addressable =
+		le16_to_cpu(lvds->sLCDTiming.usVActive);
+	/* usVBlanking_Time includes borders, so we should really be subtracting
+	 * borders duing this translation, but LVDS generally*/
+	/* doesn't have borders, so we should be okay leaving this as is for
+	 * now. May need to revisit if we ever have LVDS with borders*/
+	info->lcd_timing.vertical_blanking_time =
+		le16_to_cpu(lvds->sLCDTiming.usVBlanking_Time);
+	info->lcd_timing.horizontal_sync_offset =
+		le16_to_cpu(lvds->sLCDTiming.usHSyncOffset);
+	info->lcd_timing.horizontal_sync_width =
+		le16_to_cpu(lvds->sLCDTiming.usHSyncWidth);
+	info->lcd_timing.vertical_sync_offset =
+		le16_to_cpu(lvds->sLCDTiming.usVSyncOffset);
+	info->lcd_timing.vertical_sync_width =
+		le16_to_cpu(lvds->sLCDTiming.usVSyncWidth);
+	info->lcd_timing.horizontal_border = lvds->sLCDTiming.ucHBorder;
+	info->lcd_timing.vertical_border = lvds->sLCDTiming.ucVBorder;
+	info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HorizontalCutOff;
+	info->lcd_timing.misc_info.H_SYNC_POLARITY =
+		~(uint32_t)
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HSyncPolarity;
+	info->lcd_timing.misc_info.V_SYNC_POLARITY =
+		~(uint32_t)
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VSyncPolarity;
+	info->lcd_timing.misc_info.VERTICAL_CUT_OFF =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VerticalCutOff;
+	info->lcd_timing.misc_info.H_REPLICATION_BY2 =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.H_ReplicationBy2;
+	info->lcd_timing.misc_info.V_REPLICATION_BY2 =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.V_ReplicationBy2;
+	info->lcd_timing.misc_info.COMPOSITE_SYNC =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.CompositeSync;
+	info->lcd_timing.misc_info.INTERLACE =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.Interlace;
+	info->lcd_timing.misc_info.DOUBLE_CLOCK =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.DoubleClock;
+	info->ss_id = lvds->ucSS_Id;
+
+	/* Drr panel support can be reported by VBIOS*/
+	if (LCDPANEL_CAP_V13_DRR_SUPPORTED
+			& lvds->ucLCDPanel_SpecialHandlingCap)
+		info->drr_enabled = 1;
+
+	/* Get supported refresh rate*/
+	if (info->drr_enabled == 1) {
+		uint8_t min_rr =
+				lvds->sRefreshRateSupport.ucMinRefreshRateForDRR;
+		uint8_t rr = lvds->sRefreshRateSupport.ucSupportedRefreshRate;
+
+		if (min_rr != 0) {
+			if (SUPPORTED_LCD_REFRESHRATE_30Hz & min_rr)
+				info->supported_rr.REFRESH_RATE_30HZ = 1;
+			else if (SUPPORTED_LCD_REFRESHRATE_40Hz & min_rr)
+				info->supported_rr.REFRESH_RATE_40HZ = 1;
+			else if (SUPPORTED_LCD_REFRESHRATE_48Hz & min_rr)
+				info->supported_rr.REFRESH_RATE_48HZ = 1;
+			else if (SUPPORTED_LCD_REFRESHRATE_50Hz & min_rr)
+				info->supported_rr.REFRESH_RATE_50HZ = 1;
+			else if (SUPPORTED_LCD_REFRESHRATE_60Hz & min_rr)
+				info->supported_rr.REFRESH_RATE_60HZ = 1;
+		} else {
+			if (SUPPORTED_LCD_REFRESHRATE_30Hz & rr)
+				info->supported_rr.REFRESH_RATE_30HZ = 1;
+			else if (SUPPORTED_LCD_REFRESHRATE_40Hz & rr)
+				info->supported_rr.REFRESH_RATE_40HZ = 1;
+			else if (SUPPORTED_LCD_REFRESHRATE_48Hz & rr)
+				info->supported_rr.REFRESH_RATE_48HZ = 1;
+			else if (SUPPORTED_LCD_REFRESHRATE_50Hz & rr)
+				info->supported_rr.REFRESH_RATE_50HZ = 1;
+			else if (SUPPORTED_LCD_REFRESHRATE_60Hz & rr)
+				info->supported_rr.REFRESH_RATE_60HZ = 1;
+		}
+	}
+
+	if (ATOM_PANEL_MISC_V13_DUAL & lvds->ucLCD_Misc)
+		info->lcd_timing.misc_info.DOUBLE_CLOCK = true;
+
+	if (ATOM_PANEL_MISC_V13_8BIT_PER_COLOR & lvds->ucLCD_Misc)
+		info->lcd_timing.misc_info.RGB888 = true;
+
+	info->lcd_timing.misc_info.GREY_LEVEL =
+			(uint32_t) (ATOM_PANEL_MISC_V13_GREY_LEVEL &
+				lvds->ucLCD_Misc) >> ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT;
+
+	return BP_RESULT_OK;
+}
+
+/**
+ * bios_parser_get_encoder_cap_info
+ *
+ * @brief
+ *  Get encoder capability information of input object id
+ *
+ * @param object_id, Object id
+ * @param object_id, encoder cap information structure
+ *
+ * @return Bios parser result code
+ *
+ */
+static enum bp_result bios_parser_get_encoder_cap_info(
+	struct dc_bios *dcb,
+	struct graphics_object_id object_id,
+	struct bp_encoder_cap_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	ATOM_OBJECT *object;
+	ATOM_ENCODER_CAP_RECORD *record = NULL;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	object = get_bios_object(bp, object_id);
+
+	if (!object)
+		return BP_RESULT_BADINPUT;
+
+	record = get_encoder_cap_record(bp, object);
+	if (!record)
+		return BP_RESULT_NORECORD;
+
+	info->DP_HBR2_CAP = record->usHBR2Cap;
+	info->DP_HBR2_EN = record->usHBR2En;
+	return BP_RESULT_OK;
+}
+
+/**
+ * get_encoder_cap_record
+ *
+ * @brief
+ *  Get encoder cap record for the object
+ *
+ * @param object, ATOM object
+ *
+ * @return atom encoder cap record
+ *
+ * @note
+ *  search all records to find the ATOM_ENCODER_CAP_RECORD record
+ */
+static ATOM_ENCODER_CAP_RECORD *get_encoder_cap_record(
+	struct bios_parser *bp,
+	ATOM_OBJECT *object)
+{
+	ATOM_COMMON_RECORD_HEADER *header;
+	uint32_t offset;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid object */
+		return NULL;
+	}
+
+	offset = le16_to_cpu(object->usRecordOffset)
+					+ bp->object_info_tbl_offset;
+
+	for (;;) {
+		header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+		if (!header)
+			return NULL;
+
+		offset += header->ucRecordSize;
+
+		if (LAST_RECORD_TYPE == header->ucRecordType ||
+				!header->ucRecordSize)
+			break;
+
+		if (ATOM_ENCODER_CAP_RECORD_TYPE != header->ucRecordType)
+			continue;
+
+		if (sizeof(ATOM_ENCODER_CAP_RECORD) <= header->ucRecordSize)
+			return (ATOM_ENCODER_CAP_RECORD *)header;
+	}
+
+	return NULL;
+}
+
+static uint32_t get_ss_entry_number(
+	struct bios_parser *bp,
+	uint32_t id);
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
+	struct bios_parser *bp,
+	uint32_t id);
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+	struct bios_parser *bp,
+	uint32_t id);
+static uint32_t get_ss_entry_number_from_ss_info_tbl(
+	struct bios_parser *bp,
+	uint32_t id);
+
+/**
+ * BiosParserObject::GetNumberofSpreadSpectrumEntry
+ * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table from
+ * the VBIOS that match the SSid (to be converted from signal)
+ *
+ * @param[in] signal, ASSignalType to be converted to SSid
+ * @return number of SS Entry that match the signal
+ */
+static uint32_t bios_parser_get_ss_entry_number(
+	struct dc_bios *dcb,
+	enum as_signal_type signal)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	uint32_t ss_id = 0;
+	ATOM_COMMON_TABLE_HEADER *header;
+	struct atom_data_revision revision;
+
+	ss_id = signal_to_ss_id(signal);
+
+	if (!DATA_TABLES(ASIC_InternalSS_Info))
+		return get_ss_entry_number_from_ss_info_tbl(bp, ss_id);
+
+	header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+			DATA_TABLES(ASIC_InternalSS_Info));
+	get_atom_data_table_revision(header, &revision);
+
+	switch (revision.major) {
+	case 2:
+		switch (revision.minor) {
+		case 1:
+			return get_ss_entry_number(bp, ss_id);
+		default:
+			break;
+		}
+		break;
+	case 3:
+		switch (revision.minor) {
+		case 1:
+			return
+				get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+						bp, ss_id);
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ * get_ss_entry_number_from_ss_info_tbl
+ * Get Number of spread spectrum entry from the SS_Info table from the VBIOS.
+ *
+ * @note There can only be one entry for each id for SS_Info Table
+ *
+ * @param [in] id, spread spectrum id
+ * @return number of SS Entry that match the id
+ */
+static uint32_t get_ss_entry_number_from_ss_info_tbl(
+	struct bios_parser *bp,
+	uint32_t id)
+{
+	ATOM_SPREAD_SPECTRUM_INFO *tbl;
+	ATOM_COMMON_TABLE_HEADER *header;
+	uint32_t table_size;
+	uint32_t i;
+	uint32_t number = 0;
+	uint32_t id_local = SS_ID_UNKNOWN;
+	struct atom_data_revision revision;
+
+	/* SS_Info table exist */
+	if (!DATA_TABLES(SS_Info))
+		return number;
+
+	header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+			DATA_TABLES(SS_Info));
+	get_atom_data_table_revision(header, &revision);
+
+	tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO,
+			DATA_TABLES(SS_Info));
+
+	if (1 != revision.major || 2 > revision.minor)
+		return number;
+
+	/* have to convert from Internal_SS format to SS_Info format */
+	switch (id) {
+	case ASIC_INTERNAL_SS_ON_DP:
+		id_local = SS_ID_DP1;
+		break;
+	case ASIC_INTERNAL_SS_ON_LVDS: {
+		struct embedded_panel_info panel_info;
+
+		if (bios_parser_get_embedded_panel_info(&bp->base, &panel_info)
+				== BP_RESULT_OK)
+			id_local = panel_info.ss_id;
+		break;
+	}
+	default:
+		break;
+	}
+
+	if (id_local == SS_ID_UNKNOWN)
+		return number;
+
+	table_size = (le16_to_cpu(tbl->sHeader.usStructureSize) -
+			sizeof(ATOM_COMMON_TABLE_HEADER)) /
+					sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
+
+	for (i = 0; i < table_size; i++)
+		if (id_local == (uint32_t)tbl->asSS_Info[i].ucSS_Id) {
+			number = 1;
+			break;
+		}
+
+	return number;
+}
+
+/**
+ * get_ss_entry_number
+ * Get spread sprectrum information from the ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info table from the VBIOS
+ * There can not be more than 1 entry for  ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info.
+ *
+ * @param id, spread sprectrum info index
+ * @return Bios parser result code
+ */
+static uint32_t get_ss_entry_number(struct bios_parser *bp, uint32_t id)
+{
+	if (id == ASIC_INTERNAL_SS_ON_DP || id == ASIC_INTERNAL_SS_ON_LVDS)
+		return get_ss_entry_number_from_ss_info_tbl(bp, id);
+
+	return get_ss_entry_number_from_internal_ss_info_tbl_v2_1(bp, id);
+}
+
+/**
+ * get_ss_entry_number_from_internal_ss_info_tbl_v2_1
+ * Get NUmber of spread sprectrum entry from the ASIC_InternalSS_Info table
+ * Ver 2.1 from the VBIOS
+ * There will not be multiple entry for Ver 2.1
+ *
+ * @param id, spread sprectrum info index
+ * @return number of SS Entry that match the id
+ */
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
+	struct bios_parser *bp,
+	uint32_t id)
+{
+	ATOM_ASIC_INTERNAL_SS_INFO_V2 *header_include;
+	ATOM_ASIC_SS_ASSIGNMENT_V2 *tbl;
+	uint32_t size;
+	uint32_t i;
+
+	if (!DATA_TABLES(ASIC_InternalSS_Info))
+		return 0;
+
+	header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2,
+			DATA_TABLES(ASIC_InternalSS_Info));
+
+	size = (le16_to_cpu(header_include->sHeader.usStructureSize)
+			- sizeof(ATOM_COMMON_TABLE_HEADER))
+						/ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+
+	tbl = (ATOM_ASIC_SS_ASSIGNMENT_V2 *)
+				&header_include->asSpreadSpectrum[0];
+	for (i = 0; i < size; i++)
+		if (tbl[i].ucClockIndication == (uint8_t)id)
+			return 1;
+
+	return 0;
+}
+/**
+ * get_ss_entry_number_from_internal_ss_info_table_V3_1
+ * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of
+ * the VBIOS that matches id
+ *
+ * @param[in]  id, spread sprectrum id
+ * @return number of SS Entry that match the id
+ */
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+	struct bios_parser *bp,
+	uint32_t id)
+{
+	uint32_t number = 0;
+	ATOM_ASIC_INTERNAL_SS_INFO_V3 *header_include;
+	ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl;
+	uint32_t size;
+	uint32_t i;
+
+	if (!DATA_TABLES(ASIC_InternalSS_Info))
+		return number;
+
+	header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3,
+			DATA_TABLES(ASIC_InternalSS_Info));
+	size = (le16_to_cpu(header_include->sHeader.usStructureSize) -
+			sizeof(ATOM_COMMON_TABLE_HEADER)) /
+					sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+
+	tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *)
+				&header_include->asSpreadSpectrum[0];
+
+	for (i = 0; i < size; i++)
+		if (tbl[i].ucClockIndication == (uint8_t)id)
+			number++;
+
+	return number;
+}
+
+/**
+ * bios_parser_get_gpio_pin_info
+ * Get GpioPin information of input gpio id
+ *
+ * @param gpio_id, GPIO ID
+ * @param info, GpioPin information structure
+ * @return Bios parser result code
+ * @note
+ *  to get the GPIO PIN INFO, we need:
+ *  1. get the GPIO_ID from other object table, see GetHPDInfo()
+ *  2. in DATA_TABLE.GPIO_Pin_LUT, search all records, to get the registerA
+ *  offset/mask
+ */
+static enum bp_result bios_parser_get_gpio_pin_info(
+	struct dc_bios *dcb,
+	uint32_t gpio_id,
+	struct gpio_pin_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	ATOM_GPIO_PIN_LUT *header;
+	uint32_t count = 0;
+	uint32_t i = 0;
+
+	if (!DATA_TABLES(GPIO_Pin_LUT))
+		return BP_RESULT_BADBIOSTABLE;
+
+	header = GET_IMAGE(ATOM_GPIO_PIN_LUT, DATA_TABLES(GPIO_Pin_LUT));
+	if (!header)
+		return BP_RESULT_BADBIOSTABLE;
+
+	if (sizeof(ATOM_COMMON_TABLE_HEADER) + sizeof(ATOM_GPIO_PIN_LUT)
+			> le16_to_cpu(header->sHeader.usStructureSize))
+		return BP_RESULT_BADBIOSTABLE;
+
+	if (1 != header->sHeader.ucTableContentRevision)
+		return BP_RESULT_UNSUPPORTED;
+
+	count = (le16_to_cpu(header->sHeader.usStructureSize)
+			- sizeof(ATOM_COMMON_TABLE_HEADER))
+				/ sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+	for (i = 0; i < count; ++i) {
+		if (header->asGPIO_Pin[i].ucGPIO_ID != gpio_id)
+			continue;
+
+		info->offset =
+			(uint32_t) le16_to_cpu(header->asGPIO_Pin[i].usGpioPin_AIndex);
+		info->offset_y = info->offset + 2;
+		info->offset_en = info->offset + 1;
+		info->offset_mask = info->offset - 1;
+
+		info->mask = (uint32_t) (1 <<
+			header->asGPIO_Pin[i].ucGpioPinBitShift);
+		info->mask_y = info->mask + 2;
+		info->mask_en = info->mask + 1;
+		info->mask_mask = info->mask - 1;
+
+		return BP_RESULT_OK;
+	}
+
+	return BP_RESULT_NORECORD;
+}
+
+static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
+	ATOM_I2C_RECORD *record,
+	struct graphics_object_i2c_info *info)
+{
+	ATOM_GPIO_I2C_INFO *header;
+	uint32_t count = 0;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	/* get the GPIO_I2C info */
+	if (!DATA_TABLES(GPIO_I2C_Info))
+		return BP_RESULT_BADBIOSTABLE;
+
+	header = GET_IMAGE(ATOM_GPIO_I2C_INFO, DATA_TABLES(GPIO_I2C_Info));
+	if (!header)
+		return BP_RESULT_BADBIOSTABLE;
+
+	if (sizeof(ATOM_COMMON_TABLE_HEADER) + sizeof(ATOM_GPIO_I2C_ASSIGMENT)
+			> le16_to_cpu(header->sHeader.usStructureSize))
+		return BP_RESULT_BADBIOSTABLE;
+
+	if (1 != header->sHeader.ucTableContentRevision)
+		return BP_RESULT_UNSUPPORTED;
+
+	/* get data count */
+	count = (le16_to_cpu(header->sHeader.usStructureSize)
+			- sizeof(ATOM_COMMON_TABLE_HEADER))
+				/ sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+	if (count < record->sucI2cId.bfI2C_LineMux)
+		return BP_RESULT_BADBIOSTABLE;
+
+	/* get the GPIO_I2C_INFO */
+	info->i2c_hw_assist = record->sucI2cId.bfHW_Capable;
+	info->i2c_line = record->sucI2cId.bfI2C_LineMux;
+	info->i2c_engine_id = record->sucI2cId.bfHW_EngineID;
+	info->i2c_slave_address = record->ucI2CAddr;
+
+	info->gpio_info.clk_mask_register_index =
+			le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkMaskRegisterIndex);
+	info->gpio_info.clk_en_register_index =
+			le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkEnRegisterIndex);
+	info->gpio_info.clk_y_register_index =
+			le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkY_RegisterIndex);
+	info->gpio_info.clk_a_register_index =
+			le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkA_RegisterIndex);
+	info->gpio_info.data_mask_register_index =
+			le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataMaskRegisterIndex);
+	info->gpio_info.data_en_register_index =
+			le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataEnRegisterIndex);
+	info->gpio_info.data_y_register_index =
+			le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataY_RegisterIndex);
+	info->gpio_info.data_a_register_index =
+			le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataA_RegisterIndex);
+
+	info->gpio_info.clk_mask_shift =
+			header->asGPIO_Info[info->i2c_line].ucClkMaskShift;
+	info->gpio_info.clk_en_shift =
+			header->asGPIO_Info[info->i2c_line].ucClkEnShift;
+	info->gpio_info.clk_y_shift =
+			header->asGPIO_Info[info->i2c_line].ucClkY_Shift;
+	info->gpio_info.clk_a_shift =
+			header->asGPIO_Info[info->i2c_line].ucClkA_Shift;
+	info->gpio_info.data_mask_shift =
+			header->asGPIO_Info[info->i2c_line].ucDataMaskShift;
+	info->gpio_info.data_en_shift =
+			header->asGPIO_Info[info->i2c_line].ucDataEnShift;
+	info->gpio_info.data_y_shift =
+			header->asGPIO_Info[info->i2c_line].ucDataY_Shift;
+	info->gpio_info.data_a_shift =
+			header->asGPIO_Info[info->i2c_line].ucDataA_Shift;
+
+	return BP_RESULT_OK;
+}
+
+static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
+	struct graphics_object_id id)
+{
+	uint32_t offset;
+	ATOM_OBJECT_TABLE *tbl;
+	uint32_t i;
+
+	switch (id.type) {
+	case OBJECT_TYPE_ENCODER:
+		offset = le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+		break;
+
+	case OBJECT_TYPE_CONNECTOR:
+		offset = le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+		break;
+
+	case OBJECT_TYPE_ROUTER:
+		offset = le16_to_cpu(bp->object_info_tbl.v1_1->usRouterObjectTableOffset);
+		break;
+
+	case OBJECT_TYPE_GENERIC:
+		if (bp->object_info_tbl.revision.minor < 3)
+			return NULL;
+		offset = le16_to_cpu(bp->object_info_tbl.v1_3->usMiscObjectTableOffset);
+		break;
+
+	default:
+		return NULL;
+	}
+
+	offset += bp->object_info_tbl_offset;
+
+	tbl = GET_IMAGE(ATOM_OBJECT_TABLE, offset);
+	if (!tbl)
+		return NULL;
+
+	for (i = 0; i < tbl->ucNumberOfObjects; i++)
+		if (dal_graphics_object_id_is_equal(id,
+				object_id_from_bios_object_id(
+						le16_to_cpu(tbl->asObjects[i].usObjectID))))
+			return &tbl->asObjects[i];
+
+	return NULL;
+}
+
+static uint32_t get_dest_obj_list(struct bios_parser *bp,
+	ATOM_OBJECT *object, uint16_t **id_list)
+{
+	uint32_t offset;
+	uint8_t *number;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid object id */
+		return 0;
+	}
+
+	offset = le16_to_cpu(object->usSrcDstTableOffset)
+						+ bp->object_info_tbl_offset;
+
+	number = GET_IMAGE(uint8_t, offset);
+	if (!number)
+		return 0;
+
+	offset += sizeof(uint8_t);
+	offset += sizeof(uint16_t) * (*number);
+
+	number = GET_IMAGE(uint8_t, offset);
+	if ((!number) || (!*number))
+		return 0;
+
+	offset += sizeof(uint8_t);
+	*id_list = (uint16_t *)get_image(&bp->base, offset, *number * sizeof(uint16_t));
+
+	if (!*id_list)
+		return 0;
+
+	return *number;
+}
+
+static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
+	uint16_t **id_list)
+{
+	uint32_t offset;
+	uint8_t *number;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid object id */
+		return 0;
+	}
+
+	offset = le16_to_cpu(object->usSrcDstTableOffset)
+					+ bp->object_info_tbl_offset;
+
+	number = GET_IMAGE(uint8_t, offset);
+	if (!number)
+		return 0;
+
+	offset += sizeof(uint8_t);
+	*id_list = (uint16_t *)get_image(&bp->base, offset, *number * sizeof(uint16_t));
+
+	if (!*id_list)
+		return 0;
+
+	return *number;
+}
+
+static uint32_t get_dst_number_from_object(struct bios_parser *bp,
+	ATOM_OBJECT *object)
+{
+	uint32_t offset;
+	uint8_t *number;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid encoder object id*/
+		return 0;
+	}
+
+	offset = le16_to_cpu(object->usSrcDstTableOffset)
+					+ bp->object_info_tbl_offset;
+
+	number = GET_IMAGE(uint8_t, offset);
+	if (!number)
+		return 0;
+
+	offset += sizeof(uint8_t);
+	offset += sizeof(uint16_t) * (*number);
+
+	number = GET_IMAGE(uint8_t, offset);
+
+	if (!number)
+		return 0;
+
+	return *number;
+}
+
+
+static struct graphics_object_id object_id_from_bios_object_id(
+	uint32_t bios_object_id)
+{
+	enum object_type type;
+	enum object_enum_id enum_id;
+	struct graphics_object_id go_id = { 0 };
+
+	type = object_type_from_bios_object_id(bios_object_id);
+
+	if (OBJECT_TYPE_UNKNOWN == type)
+		return go_id;
+
+	enum_id = enum_id_from_bios_object_id(bios_object_id);
+
+	if (ENUM_ID_UNKNOWN == enum_id)
+		return go_id;
+
+	go_id = dal_graphics_object_id_init(
+			id_from_bios_object_id(type, bios_object_id), enum_id, type);
+
+	return go_id;
+}
+
+static enum object_type object_type_from_bios_object_id(uint32_t bios_object_id)
+{
+	uint32_t bios_object_type = (bios_object_id & OBJECT_TYPE_MASK)
+				>> OBJECT_TYPE_SHIFT;
+	enum object_type object_type;
+
+	switch (bios_object_type) {
+	case GRAPH_OBJECT_TYPE_GPU:
+		object_type = OBJECT_TYPE_GPU;
+		break;
+	case GRAPH_OBJECT_TYPE_ENCODER:
+		object_type = OBJECT_TYPE_ENCODER;
+		break;
+	case GRAPH_OBJECT_TYPE_CONNECTOR:
+		object_type = OBJECT_TYPE_CONNECTOR;
+		break;
+	case GRAPH_OBJECT_TYPE_ROUTER:
+		object_type = OBJECT_TYPE_ROUTER;
+		break;
+	case GRAPH_OBJECT_TYPE_GENERIC:
+		object_type = OBJECT_TYPE_GENERIC;
+		break;
+	default:
+		object_type = OBJECT_TYPE_UNKNOWN;
+		break;
+	}
+
+	return object_type;
+}
+
+static enum object_enum_id enum_id_from_bios_object_id(uint32_t bios_object_id)
+{
+	uint32_t bios_enum_id =
+			(bios_object_id & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+	enum object_enum_id id;
+
+	switch (bios_enum_id) {
+	case GRAPH_OBJECT_ENUM_ID1:
+		id = ENUM_ID_1;
+		break;
+	case GRAPH_OBJECT_ENUM_ID2:
+		id = ENUM_ID_2;
+		break;
+	case GRAPH_OBJECT_ENUM_ID3:
+		id = ENUM_ID_3;
+		break;
+	case GRAPH_OBJECT_ENUM_ID4:
+		id = ENUM_ID_4;
+		break;
+	case GRAPH_OBJECT_ENUM_ID5:
+		id = ENUM_ID_5;
+		break;
+	case GRAPH_OBJECT_ENUM_ID6:
+		id = ENUM_ID_6;
+		break;
+	case GRAPH_OBJECT_ENUM_ID7:
+		id = ENUM_ID_7;
+		break;
+	default:
+		id = ENUM_ID_UNKNOWN;
+		break;
+	}
+
+	return id;
+}
+
+static uint32_t id_from_bios_object_id(enum object_type type,
+	uint32_t bios_object_id)
+{
+	switch (type) {
+	case OBJECT_TYPE_GPU:
+		return gpu_id_from_bios_object_id(bios_object_id);
+	case OBJECT_TYPE_ENCODER:
+		return (uint32_t)encoder_id_from_bios_object_id(bios_object_id);
+	case OBJECT_TYPE_CONNECTOR:
+		return (uint32_t)connector_id_from_bios_object_id(
+				bios_object_id);
+	case OBJECT_TYPE_GENERIC:
+		return generic_id_from_bios_object_id(bios_object_id);
+	default:
+		return 0;
+	}
+}
+
+static enum connector_id connector_id_from_bios_object_id(
+	uint32_t bios_object_id)
+{
+	uint32_t bios_connector_id = gpu_id_from_bios_object_id(bios_object_id);
+
+	enum connector_id id;
+
+	switch (bios_connector_id) {
+	case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I:
+		id = CONNECTOR_ID_SINGLE_LINK_DVII;
+		break;
+	case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I:
+		id = CONNECTOR_ID_DUAL_LINK_DVII;
+		break;
+	case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D:
+		id = CONNECTOR_ID_SINGLE_LINK_DVID;
+		break;
+	case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D:
+		id = CONNECTOR_ID_DUAL_LINK_DVID;
+		break;
+	case CONNECTOR_OBJECT_ID_VGA:
+		id = CONNECTOR_ID_VGA;
+		break;
+	case CONNECTOR_OBJECT_ID_HDMI_TYPE_A:
+		id = CONNECTOR_ID_HDMI_TYPE_A;
+		break;
+	case CONNECTOR_OBJECT_ID_LVDS:
+		id = CONNECTOR_ID_LVDS;
+		break;
+	case CONNECTOR_OBJECT_ID_PCIE_CONNECTOR:
+		id = CONNECTOR_ID_PCIE;
+		break;
+	case CONNECTOR_OBJECT_ID_HARDCODE_DVI:
+		id = CONNECTOR_ID_HARDCODE_DVI;
+		break;
+	case CONNECTOR_OBJECT_ID_DISPLAYPORT:
+		id = CONNECTOR_ID_DISPLAY_PORT;
+		break;
+	case CONNECTOR_OBJECT_ID_eDP:
+		id = CONNECTOR_ID_EDP;
+		break;
+	case CONNECTOR_OBJECT_ID_MXM:
+		id = CONNECTOR_ID_MXM;
+		break;
+	default:
+		id = CONNECTOR_ID_UNKNOWN;
+		break;
+	}
+
+	return id;
+}
+
+static enum encoder_id encoder_id_from_bios_object_id(uint32_t bios_object_id)
+{
+	uint32_t bios_encoder_id = gpu_id_from_bios_object_id(bios_object_id);
+	enum encoder_id id;
+
+	switch (bios_encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		id = ENCODER_ID_INTERNAL_LVDS;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+		id = ENCODER_ID_INTERNAL_TMDS1;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS2:
+		id = ENCODER_ID_INTERNAL_TMDS2;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+		id = ENCODER_ID_INTERNAL_DAC1;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+		id = ENCODER_ID_INTERNAL_DAC2;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		id = ENCODER_ID_INTERNAL_LVTM1;
+		break;
+	case ENCODER_OBJECT_ID_HDMI_INTERNAL:
+		id = ENCODER_ID_INTERNAL_HDMI;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+		id = ENCODER_ID_INTERNAL_KLDSCP_TMDS1;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+		id = ENCODER_ID_INTERNAL_KLDSCP_DAC1;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		id = ENCODER_ID_INTERNAL_KLDSCP_DAC2;
+		break;
+	case ENCODER_OBJECT_ID_MVPU_FPGA:
+		id = ENCODER_ID_EXTERNAL_MVPU_FPGA;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+		id = ENCODER_ID_INTERNAL_DDI;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+		id = ENCODER_ID_INTERNAL_UNIPHY;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		id = ENCODER_ID_INTERNAL_KLDSCP_LVTMA;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+		id = ENCODER_ID_INTERNAL_UNIPHY1;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		id = ENCODER_ID_INTERNAL_UNIPHY2;
+		break;
+	case ENCODER_OBJECT_ID_ALMOND: /* ENCODER_OBJECT_ID_NUTMEG */
+		id = ENCODER_ID_EXTERNAL_NUTMEG;
+		break;
+	case ENCODER_OBJECT_ID_TRAVIS:
+		id = ENCODER_ID_EXTERNAL_TRAVIS;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+		id = ENCODER_ID_INTERNAL_UNIPHY3;
+		break;
+	default:
+		id = ENCODER_ID_UNKNOWN;
+		ASSERT(0);
+		break;
+	}
+
+	return id;
+}
+
+uint32_t gpu_id_from_bios_object_id(uint32_t bios_object_id)
+{
+	return (bios_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+}
+
+enum generic_id generic_id_from_bios_object_id(uint32_t bios_object_id)
+{
+	uint32_t bios_generic_id = gpu_id_from_bios_object_id(bios_object_id);
+
+	enum generic_id id;
+
+	switch (bios_generic_id) {
+	case GENERIC_OBJECT_ID_MXM_OPM:
+		id = GENERIC_ID_MXM_OPM;
+		break;
+	case GENERIC_OBJECT_ID_GLSYNC:
+		id = GENERIC_ID_GLSYNC;
+		break;
+	case GENERIC_OBJECT_ID_STEREO_PIN:
+		id = GENERIC_ID_STEREO;
+		break;
+	default:
+		id = GENERIC_ID_UNKNOWN;
+		break;
+	}
+
+	return id;
+}
+
+static struct device_id device_type_from_device_id(uint16_t device_id)
+{
+
+	struct device_id result_device_id;
+
+	switch (device_id) {
+	case ATOM_DEVICE_LCD1_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_LCD;
+		result_device_id.enum_id = 1;
+		break;
+
+	case ATOM_DEVICE_LCD2_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_LCD;
+		result_device_id.enum_id = 2;
+		break;
+
+	case ATOM_DEVICE_CRT1_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_CRT;
+		result_device_id.enum_id = 1;
+		break;
+
+	case ATOM_DEVICE_CRT2_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_CRT;
+		result_device_id.enum_id = 2;
+		break;
+
+	case ATOM_DEVICE_DFP1_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 1;
+		break;
+
+	case ATOM_DEVICE_DFP2_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 2;
+		break;
+
+	case ATOM_DEVICE_DFP3_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 3;
+		break;
+
+	case ATOM_DEVICE_DFP4_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 4;
+		break;
+
+	case ATOM_DEVICE_DFP5_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 5;
+		break;
+
+	case ATOM_DEVICE_DFP6_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 6;
+		break;
+
+	default:
+		BREAK_TO_DEBUGGER(); /* Invalid device Id */
+		result_device_id.device_type = DEVICE_TYPE_UNKNOWN;
+		result_device_id.enum_id = 0;
+	}
+	return result_device_id;
+}
+
+static void get_atom_data_table_revision(
+	ATOM_COMMON_TABLE_HEADER *atom_data_tbl,
+	struct atom_data_revision *tbl_revision)
+{
+	if (!tbl_revision)
+		return;
+
+	/* initialize the revision to 0 which is invalid revision */
+	tbl_revision->major = 0;
+	tbl_revision->minor = 0;
+
+	if (!atom_data_tbl)
+		return;
+
+	tbl_revision->major =
+			(uint32_t) GET_DATA_TABLE_MAJOR_REVISION(atom_data_tbl);
+	tbl_revision->minor =
+			(uint32_t) GET_DATA_TABLE_MINOR_REVISION(atom_data_tbl);
+}
+
+static uint32_t signal_to_ss_id(enum as_signal_type signal)
+{
+	uint32_t clk_id_ss = 0;
+
+	switch (signal) {
+	case AS_SIGNAL_TYPE_DVI:
+		clk_id_ss = ASIC_INTERNAL_SS_ON_TMDS;
+		break;
+	case AS_SIGNAL_TYPE_HDMI:
+		clk_id_ss = ASIC_INTERNAL_SS_ON_HDMI;
+		break;
+	case AS_SIGNAL_TYPE_LVDS:
+		clk_id_ss = ASIC_INTERNAL_SS_ON_LVDS;
+		break;
+	case AS_SIGNAL_TYPE_DISPLAY_PORT:
+		clk_id_ss = ASIC_INTERNAL_SS_ON_DP;
+		break;
+	case AS_SIGNAL_TYPE_GPU_PLL:
+		clk_id_ss = ASIC_INTERNAL_GPUPLL_SS;
+		break;
+	default:
+		break;
+	}
+	return clk_id_ss;
+}
+
+static uint32_t get_support_mask_for_device_id(struct device_id device_id)
+{
+	enum dal_device_type device_type = device_id.device_type;
+	uint32_t enum_id = device_id.enum_id;
+
+	switch (device_type) {
+	case DEVICE_TYPE_LCD:
+		switch (enum_id) {
+		case 1:
+			return ATOM_DEVICE_LCD1_SUPPORT;
+		case 2:
+			return ATOM_DEVICE_LCD2_SUPPORT;
+		default:
+			break;
+		}
+		break;
+	case DEVICE_TYPE_CRT:
+		switch (enum_id) {
+		case 1:
+			return ATOM_DEVICE_CRT1_SUPPORT;
+		case 2:
+			return ATOM_DEVICE_CRT2_SUPPORT;
+		default:
+			break;
+		}
+		break;
+	case DEVICE_TYPE_DFP:
+		switch (enum_id) {
+		case 1:
+			return ATOM_DEVICE_DFP1_SUPPORT;
+		case 2:
+			return ATOM_DEVICE_DFP2_SUPPORT;
+		case 3:
+			return ATOM_DEVICE_DFP3_SUPPORT;
+		case 4:
+			return ATOM_DEVICE_DFP4_SUPPORT;
+		case 5:
+			return ATOM_DEVICE_DFP5_SUPPORT;
+		case 6:
+			return ATOM_DEVICE_DFP6_SUPPORT;
+		default:
+			break;
+		}
+		break;
+	case DEVICE_TYPE_CV:
+		switch (enum_id) {
+		case 1:
+			return ATOM_DEVICE_CV_SUPPORT;
+		default:
+			break;
+		}
+		break;
+	case DEVICE_TYPE_TV:
+		switch (enum_id) {
+		case 1:
+			return ATOM_DEVICE_TV1_SUPPORT;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	};
+
+	/* Unidentified device ID, return empty support mask. */
+	return 0;
+}
+
+/**
+ *  HwContext interface for writing MM registers
+ */
+
+static bool i2c_read(
+	struct bios_parser *bp,
+	struct graphics_object_i2c_info *i2c_info,
+	uint8_t *buffer,
+	uint32_t length)
+{
+	struct ddc *ddc;
+	uint8_t offset[2] = { 0, 0 };
+	bool result = false;
+	struct i2c_command cmd;
+	struct gpio_ddc_hw_info hw_info = {
+		i2c_info->i2c_hw_assist,
+		i2c_info->i2c_line };
+
+	ddc = dal_gpio_create_ddc(bp->base.ctx->gpio_service,
+		i2c_info->gpio_info.clk_a_register_index,
+		(1 << i2c_info->gpio_info.clk_a_shift), &hw_info);
+
+	if (!ddc)
+		return result;
+
+	/*Using SW engine */
+	cmd.engine = I2C_COMMAND_ENGINE_SW;
+	cmd.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
+
+	{
+		struct i2c_payload payloads[] = {
+				{
+						.address = i2c_info->i2c_slave_address >> 1,
+						.data = offset,
+						.length = sizeof(offset),
+						.write = true
+				},
+				{
+						.address = i2c_info->i2c_slave_address >> 1,
+						.data = buffer,
+						.length = length,
+						.write = false
+				}
+		};
+
+		cmd.payloads = payloads;
+		cmd.number_of_payloads = ARRAY_SIZE(payloads);
+
+		/* TODO route this through drm i2c_adapter */
+		result = dal_i2caux_submit_i2c_command(
+				ddc->ctx->i2caux,
+				ddc,
+				&cmd);
+	}
+
+	dal_gpio_destroy_ddc(&ddc);
+
+	return result;
+}
+
+/**
+ * Read external display connection info table through i2c.
+ * validate the GUID and checksum.
+ *
+ * @return enum bp_result whether all data was sucessfully read
+ */
+static enum bp_result get_ext_display_connection_info(
+	struct bios_parser *bp,
+	ATOM_OBJECT *opm_object,
+	ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO *ext_display_connection_info_tbl)
+{
+	bool config_tbl_present = false;
+	ATOM_I2C_RECORD *i2c_record = NULL;
+	uint32_t i = 0;
+
+	if (opm_object == NULL)
+		return BP_RESULT_BADINPUT;
+
+	i2c_record = get_i2c_record(bp, opm_object);
+
+	if (i2c_record != NULL) {
+		ATOM_GPIO_I2C_INFO *gpio_i2c_header;
+		struct graphics_object_i2c_info i2c_info;
+
+		gpio_i2c_header = GET_IMAGE(ATOM_GPIO_I2C_INFO,
+				bp->master_data_tbl->ListOfDataTables.GPIO_I2C_Info);
+
+		if (NULL == gpio_i2c_header)
+			return BP_RESULT_BADBIOSTABLE;
+
+		if (get_gpio_i2c_info(bp, i2c_record, &i2c_info) !=
+				BP_RESULT_OK)
+			return BP_RESULT_BADBIOSTABLE;
+
+		if (i2c_read(bp,
+			     &i2c_info,
+			     (uint8_t *)ext_display_connection_info_tbl,
+			     sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO))) {
+			config_tbl_present = true;
+		}
+	}
+
+	/* Validate GUID */
+	if (config_tbl_present)
+		for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; i++) {
+			if (ext_display_connection_info_tbl->ucGuid[i]
+			    != ext_display_connection_guid[i]) {
+				config_tbl_present = false;
+				break;
+			}
+		}
+
+	/* Validate checksum */
+	if (config_tbl_present) {
+		uint8_t check_sum = 0;
+		uint8_t *buf =
+				(uint8_t *)ext_display_connection_info_tbl;
+
+		for (i = 0; i < sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO);
+				i++) {
+			check_sum += buf[i];
+		}
+
+		if (check_sum != 0)
+			config_tbl_present = false;
+	}
+
+	if (config_tbl_present)
+		return BP_RESULT_OK;
+	else
+		return BP_RESULT_FAILURE;
+}
+
+/*
+ * Gets the first device ID in the same group as the given ID for enumerating.
+ * For instance, if any DFP device ID is passed, returns the device ID for DFP1.
+ *
+ * The first device ID in the same group as the passed device ID, or 0 if no
+ * matching device group found.
+ */
+static uint32_t enum_first_device_id(uint32_t dev_id)
+{
+	/* Return the first in the group that this ID belongs to. */
+	if (dev_id & ATOM_DEVICE_CRT_SUPPORT)
+		return ATOM_DEVICE_CRT1_SUPPORT;
+	else if (dev_id & ATOM_DEVICE_DFP_SUPPORT)
+		return ATOM_DEVICE_DFP1_SUPPORT;
+	else if (dev_id & ATOM_DEVICE_LCD_SUPPORT)
+		return ATOM_DEVICE_LCD1_SUPPORT;
+	else if (dev_id & ATOM_DEVICE_TV_SUPPORT)
+		return ATOM_DEVICE_TV1_SUPPORT;
+	else if (dev_id & ATOM_DEVICE_CV_SUPPORT)
+		return ATOM_DEVICE_CV_SUPPORT;
+
+	/* No group found for this device ID. */
+
+	dm_error("%s: incorrect input %d\n", __func__, dev_id);
+	/* No matching support flag for given device ID */
+	return 0;
+}
+
+/*
+ * Gets the next device ID in the group for a given device ID.
+ *
+ * The current device ID being enumerated on.
+ *
+ * The next device ID in the group, or 0 if no device exists.
+ */
+static uint32_t enum_next_dev_id(uint32_t dev_id)
+{
+	/* Get next device ID in the group. */
+	switch (dev_id) {
+	case ATOM_DEVICE_CRT1_SUPPORT:
+		return ATOM_DEVICE_CRT2_SUPPORT;
+	case ATOM_DEVICE_LCD1_SUPPORT:
+		return ATOM_DEVICE_LCD2_SUPPORT;
+	case ATOM_DEVICE_DFP1_SUPPORT:
+		return ATOM_DEVICE_DFP2_SUPPORT;
+	case ATOM_DEVICE_DFP2_SUPPORT:
+		return ATOM_DEVICE_DFP3_SUPPORT;
+	case ATOM_DEVICE_DFP3_SUPPORT:
+		return ATOM_DEVICE_DFP4_SUPPORT;
+	case ATOM_DEVICE_DFP4_SUPPORT:
+		return ATOM_DEVICE_DFP5_SUPPORT;
+	case ATOM_DEVICE_DFP5_SUPPORT:
+		return ATOM_DEVICE_DFP6_SUPPORT;
+	}
+
+	/* Done enumerating through devices. */
+	return 0;
+}
+
+/*
+ * Returns the new device tag record for patched BIOS object.
+ *
+ * [IN] pExtDisplayPath - External display path to copy device tag from.
+ * [IN] deviceSupport - Bit vector for device ID support flags.
+ * [OUT] pDeviceTag - Device tag structure to fill with patched data.
+ *
+ * True if a compatible device ID was found, false otherwise.
+ */
+static bool get_patched_device_tag(
+	struct bios_parser *bp,
+	EXT_DISPLAY_PATH *ext_display_path,
+	uint32_t device_support,
+	ATOM_CONNECTOR_DEVICE_TAG *device_tag)
+{
+	uint32_t dev_id;
+	/* Use fallback behaviour if not supported. */
+	if (!bp->remap_device_tags) {
+		device_tag->ulACPIDeviceEnum =
+				cpu_to_le32((uint32_t) le16_to_cpu(ext_display_path->usDeviceACPIEnum));
+		device_tag->usDeviceID =
+				cpu_to_le16(le16_to_cpu(ext_display_path->usDeviceTag));
+		return true;
+	}
+
+	/* Find the first unused in the same group. */
+	dev_id = enum_first_device_id(le16_to_cpu(ext_display_path->usDeviceTag));
+	while (dev_id != 0) {
+		/* Assign this device ID if supported. */
+		if ((device_support & dev_id) != 0) {
+			device_tag->ulACPIDeviceEnum =
+					cpu_to_le32((uint32_t) le16_to_cpu(ext_display_path->usDeviceACPIEnum));
+			device_tag->usDeviceID = cpu_to_le16((USHORT) dev_id);
+			return true;
+		}
+
+		dev_id = enum_next_dev_id(dev_id);
+	}
+
+	/* No compatible device ID found. */
+	return false;
+}
+
+/*
+ * Adds a device tag to a BIOS object's device tag record if there is
+ * matching device ID supported.
+ *
+ * pObject - Pointer to the BIOS object to add the device tag to.
+ * pExtDisplayPath - Display path to retrieve base device ID from.
+ * pDeviceSupport - Pointer to bit vector for supported device IDs.
+ */
+static void add_device_tag_from_ext_display_path(
+	struct bios_parser *bp,
+	ATOM_OBJECT *object,
+	EXT_DISPLAY_PATH *ext_display_path,
+	uint32_t *device_support)
+{
+	/* Get device tag record for object. */
+	ATOM_CONNECTOR_DEVICE_TAG *device_tag = NULL;
+	ATOM_CONNECTOR_DEVICE_TAG_RECORD *device_tag_record = NULL;
+	enum bp_result result =
+			bios_parser_get_device_tag_record(
+					bp, object, &device_tag_record);
+
+	if ((le16_to_cpu(ext_display_path->usDeviceTag) != CONNECTOR_OBJECT_ID_NONE)
+			&& (result == BP_RESULT_OK)) {
+		uint8_t index;
+
+		if ((device_tag_record->ucNumberOfDevice == 1) &&
+				(le16_to_cpu(device_tag_record->asDeviceTag[0].usDeviceID) == 0)) {
+			/*Workaround bug in current VBIOS releases where
+			 * ucNumberOfDevice = 1 but there is no actual device
+			 * tag data. This w/a is temporary until the updated
+			 * VBIOS is distributed. */
+			device_tag_record->ucNumberOfDevice =
+					device_tag_record->ucNumberOfDevice - 1;
+		}
+
+		/* Attempt to find a matching device ID. */
+		index = device_tag_record->ucNumberOfDevice;
+		device_tag = &device_tag_record->asDeviceTag[index];
+		if (get_patched_device_tag(
+				bp,
+				ext_display_path,
+				*device_support,
+				device_tag)) {
+			/* Update cached device support to remove assigned ID.
+			 */
+			*device_support &= ~le16_to_cpu(device_tag->usDeviceID);
+			device_tag_record->ucNumberOfDevice++;
+		}
+	}
+}
+
+/*
+ * Read out a single EXT_DISPLAY_PATH from the external display connection info
+ * table. The specific entry in the table is determined by the enum_id passed
+ * in.
+ *
+ * EXT_DISPLAY_PATH describing a single Configuration table entry
+ */
+
+#define INVALID_CONNECTOR 0xffff
+
+static EXT_DISPLAY_PATH *get_ext_display_path_entry(
+	ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO *config_table,
+	uint32_t bios_object_id)
+{
+	EXT_DISPLAY_PATH *ext_display_path;
+	uint32_t ext_display_path_index =
+			((bios_object_id & ENUM_ID_MASK) >> ENUM_ID_SHIFT) - 1;
+
+	if (ext_display_path_index >= MAX_NUMBER_OF_EXT_DISPLAY_PATH)
+		return NULL;
+
+	ext_display_path = &config_table->sPath[ext_display_path_index];
+
+	if (le16_to_cpu(ext_display_path->usDeviceConnector) == INVALID_CONNECTOR)
+		ext_display_path->usDeviceConnector = cpu_to_le16(0);
+
+	return ext_display_path;
+}
+
+/*
+ * Get AUX/DDC information of input object id
+ *
+ * search all records to find the ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE record
+ * IR
+ */
+static ATOM_CONNECTOR_AUXDDC_LUT_RECORD *get_ext_connector_aux_ddc_lut_record(
+	struct bios_parser *bp,
+	ATOM_OBJECT *object)
+{
+	uint32_t offset;
+	ATOM_COMMON_RECORD_HEADER *header;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER();
+		/* Invalid object */
+		return NULL;
+	}
+
+	offset = le16_to_cpu(object->usRecordOffset)
+					+ bp->object_info_tbl_offset;
+
+	for (;;) {
+		header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+		if (!header)
+			return NULL;
+
+		if (LAST_RECORD_TYPE == header->ucRecordType ||
+				0 == header->ucRecordSize)
+			break;
+
+		if (ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE ==
+				header->ucRecordType &&
+				sizeof(ATOM_CONNECTOR_AUXDDC_LUT_RECORD) <=
+				header->ucRecordSize)
+			return (ATOM_CONNECTOR_AUXDDC_LUT_RECORD *)(header);
+
+		offset += header->ucRecordSize;
+	}
+
+	return NULL;
+}
+
+/*
+ * Get AUX/DDC information of input object id
+ *
+ * search all records to find the ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE record
+ * IR
+ */
+static ATOM_CONNECTOR_HPDPIN_LUT_RECORD *get_ext_connector_hpd_pin_lut_record(
+	struct bios_parser *bp,
+	ATOM_OBJECT *object)
+{
+	uint32_t offset;
+	ATOM_COMMON_RECORD_HEADER *header;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER();
+		/* Invalid object */
+		return NULL;
+	}
+
+	offset = le16_to_cpu(object->usRecordOffset)
+					+ bp->object_info_tbl_offset;
+
+	for (;;) {
+		header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+		if (!header)
+			return NULL;
+
+		if (LAST_RECORD_TYPE == header->ucRecordType ||
+				0 == header->ucRecordSize)
+			break;
+
+		if (ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE ==
+				header->ucRecordType &&
+				sizeof(ATOM_CONNECTOR_HPDPIN_LUT_RECORD) <=
+				header->ucRecordSize)
+			return (ATOM_CONNECTOR_HPDPIN_LUT_RECORD *)header;
+
+		offset += header->ucRecordSize;
+	}
+
+	return NULL;
+}
+
+/*
+ * Check whether we need to patch the VBIOS connector info table with
+ * data from an external display connection info table.  This is
+ * necessary to support MXM boards with an OPM (output personality
+ * module).  With these designs, the VBIOS connector info table
+ * specifies an MXM_CONNECTOR with a unique ID.  The driver retrieves
+ * the external connection info table through i2c and then looks up the
+ * connector ID to find the real connector type (e.g. DFP1).
+ *
+ */
+static enum bp_result patch_bios_image_from_ext_display_connection_info(
+	struct bios_parser *bp)
+{
+	ATOM_OBJECT_TABLE *connector_tbl;
+	uint32_t connector_tbl_offset;
+	struct graphics_object_id object_id;
+	ATOM_OBJECT *object;
+	ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO ext_display_connection_info_tbl;
+	EXT_DISPLAY_PATH *ext_display_path;
+	ATOM_CONNECTOR_AUXDDC_LUT_RECORD *aux_ddc_lut_record = NULL;
+	ATOM_I2C_RECORD *i2c_record = NULL;
+	ATOM_CONNECTOR_HPDPIN_LUT_RECORD *hpd_pin_lut_record = NULL;
+	ATOM_HPD_INT_RECORD *hpd_record = NULL;
+	ATOM_OBJECT_TABLE *encoder_table;
+	uint32_t encoder_table_offset;
+	ATOM_OBJECT *opm_object = NULL;
+	uint32_t i = 0;
+	struct graphics_object_id opm_object_id =
+			dal_graphics_object_id_init(
+					GENERIC_ID_MXM_OPM,
+					ENUM_ID_1,
+					OBJECT_TYPE_GENERIC);
+	ATOM_CONNECTOR_DEVICE_TAG_RECORD *dev_tag_record;
+	uint32_t cached_device_support =
+			le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport);
+
+	uint32_t dst_number;
+	uint16_t *dst_object_id_list;
+
+	opm_object = get_bios_object(bp, opm_object_id);
+	if (!opm_object)
+		return BP_RESULT_UNSUPPORTED;
+
+	memset(&ext_display_connection_info_tbl, 0,
+			sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO));
+
+	connector_tbl_offset = bp->object_info_tbl_offset
+			+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+	connector_tbl = GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+
+	/* Read Connector info table from EEPROM through i2c */
+	if (get_ext_display_connection_info(bp,
+					    opm_object,
+					    &ext_display_connection_info_tbl) != BP_RESULT_OK) {
+
+		dm_logger_write(bp->base.ctx->logger, LOG_BIOS,
+				"%s: Failed to read Connection Info Table", __func__);
+		return BP_RESULT_UNSUPPORTED;
+	}
+
+	/* Get pointer to AUX/DDC and HPD LUTs */
+	aux_ddc_lut_record =
+			get_ext_connector_aux_ddc_lut_record(bp, opm_object);
+	hpd_pin_lut_record =
+			get_ext_connector_hpd_pin_lut_record(bp, opm_object);
+
+	if ((aux_ddc_lut_record == NULL) || (hpd_pin_lut_record == NULL))
+		return BP_RESULT_UNSUPPORTED;
+
+	/* Cache support bits for currently unmapped device types. */
+	if (bp->remap_device_tags) {
+		for (i = 0; i < connector_tbl->ucNumberOfObjects; ++i) {
+			uint32_t j;
+			/* Remove support for all non-MXM connectors. */
+			object = &connector_tbl->asObjects[i];
+			object_id = object_id_from_bios_object_id(
+					le16_to_cpu(object->usObjectID));
+			if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+					(CONNECTOR_ID_MXM == object_id.id))
+				continue;
+
+			/* Remove support for all device tags. */
+			if (bios_parser_get_device_tag_record(
+					bp, object, &dev_tag_record) != BP_RESULT_OK)
+				continue;
+
+			for (j = 0; j < dev_tag_record->ucNumberOfDevice; ++j) {
+				ATOM_CONNECTOR_DEVICE_TAG *device_tag =
+						&dev_tag_record->asDeviceTag[j];
+				cached_device_support &=
+						~le16_to_cpu(device_tag->usDeviceID);
+			}
+		}
+	}
+
+	/* Find all MXM connector objects and patch them with connector info
+	 * from the external display connection info table. */
+	for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+		uint32_t j;
+
+		object = &connector_tbl->asObjects[i];
+		object_id = object_id_from_bios_object_id(le16_to_cpu(object->usObjectID));
+		if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+				(CONNECTOR_ID_MXM != object_id.id))
+			continue;
+
+		/* Get the correct connection info table entry based on the enum
+		 * id. */
+		ext_display_path = get_ext_display_path_entry(
+				&ext_display_connection_info_tbl,
+				le16_to_cpu(object->usObjectID));
+		if (!ext_display_path)
+			return BP_RESULT_FAILURE;
+
+		/* Patch device connector ID */
+		object->usObjectID =
+				cpu_to_le16(le16_to_cpu(ext_display_path->usDeviceConnector));
+
+		/* Patch device tag, ulACPIDeviceEnum. */
+		add_device_tag_from_ext_display_path(
+				bp,
+				object,
+				ext_display_path,
+				&cached_device_support);
+
+		/* Patch HPD info */
+		if (ext_display_path->ucExtHPDPINLutIndex <
+				MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES) {
+			hpd_record = get_hpd_record(bp, object);
+			if (hpd_record) {
+				uint8_t index =
+						ext_display_path->ucExtHPDPINLutIndex;
+				hpd_record->ucHPDIntGPIOID =
+						hpd_pin_lut_record->ucHPDPINMap[index];
+			} else {
+				BREAK_TO_DEBUGGER();
+				/* Invalid hpd record */
+				return BP_RESULT_FAILURE;
+			}
+		}
+
+		/* Patch I2C/AUX info */
+		if (ext_display_path->ucExtHPDPINLutIndex <
+				MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES) {
+			i2c_record = get_i2c_record(bp, object);
+			if (i2c_record) {
+				uint8_t index =
+						ext_display_path->ucExtAUXDDCLutIndex;
+				i2c_record->sucI2cId =
+						aux_ddc_lut_record->ucAUXDDCMap[index];
+			} else {
+				BREAK_TO_DEBUGGER();
+				/* Invalid I2C record */
+				return BP_RESULT_FAILURE;
+			}
+		}
+
+		/* Merge with other MXM connectors that map to the same physical
+		 * connector. */
+		for (j = i + 1;
+				j < connector_tbl->ucNumberOfObjects; j++) {
+			ATOM_OBJECT *next_object;
+			struct graphics_object_id next_object_id;
+			EXT_DISPLAY_PATH *next_ext_display_path;
+
+			next_object = &connector_tbl->asObjects[j];
+			next_object_id = object_id_from_bios_object_id(
+					le16_to_cpu(next_object->usObjectID));
+
+			if ((OBJECT_TYPE_CONNECTOR != next_object_id.type) &&
+					(CONNECTOR_ID_MXM == next_object_id.id))
+				continue;
+
+			next_ext_display_path = get_ext_display_path_entry(
+					&ext_display_connection_info_tbl,
+					le16_to_cpu(next_object->usObjectID));
+
+			if (next_ext_display_path == NULL)
+				return BP_RESULT_FAILURE;
+
+			/* Merge if using same connector. */
+			if ((le16_to_cpu(next_ext_display_path->usDeviceConnector) ==
+					le16_to_cpu(ext_display_path->usDeviceConnector)) &&
+					(le16_to_cpu(ext_display_path->usDeviceConnector) != 0)) {
+				/* Clear duplicate connector from table. */
+				next_object->usObjectID = cpu_to_le16(0);
+				add_device_tag_from_ext_display_path(
+						bp,
+						object,
+						ext_display_path,
+						&cached_device_support);
+			}
+		}
+	}
+
+	/* Find all encoders which have an MXM object as their destination.
+	 *  Replace the MXM object with the real connector Id from the external
+	 *  display connection info table */
+
+	encoder_table_offset = bp->object_info_tbl_offset
+			+ le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+	encoder_table = GET_IMAGE(ATOM_OBJECT_TABLE, encoder_table_offset);
+
+	for (i = 0; i < encoder_table->ucNumberOfObjects; i++) {
+		uint32_t j;
+
+		object = &encoder_table->asObjects[i];
+
+		dst_number = get_dest_obj_list(bp, object, &dst_object_id_list);
+
+		for (j = 0; j < dst_number; j++) {
+			object_id = object_id_from_bios_object_id(
+					dst_object_id_list[j]);
+
+			if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+					(CONNECTOR_ID_MXM != object_id.id))
+				continue;
+
+			/* Get the correct connection info table entry based on
+			 * the enum id. */
+			ext_display_path =
+					get_ext_display_path_entry(
+							&ext_display_connection_info_tbl,
+							dst_object_id_list[j]);
+
+			if (ext_display_path == NULL)
+				return BP_RESULT_FAILURE;
+
+			dst_object_id_list[j] =
+					le16_to_cpu(ext_display_path->usDeviceConnector);
+		}
+	}
+
+	return BP_RESULT_OK;
+}
+
+/*
+ * Check whether we need to patch the VBIOS connector info table with
+ * data from an external display connection info table.  This is
+ * necessary to support MXM boards with an OPM (output personality
+ * module).  With these designs, the VBIOS connector info table
+ * specifies an MXM_CONNECTOR with a unique ID.  The driver retrieves
+ * the external connection info table through i2c and then looks up the
+ * connector ID to find the real connector type (e.g. DFP1).
+ *
+ */
+
+static void process_ext_display_connection_info(struct bios_parser *bp)
+{
+	ATOM_OBJECT_TABLE *connector_tbl;
+	uint32_t connector_tbl_offset;
+	struct graphics_object_id object_id;
+	ATOM_OBJECT *object;
+	bool mxm_connector_found = false;
+	bool null_entry_found = false;
+	uint32_t i = 0;
+
+	connector_tbl_offset = bp->object_info_tbl_offset +
+			le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+	connector_tbl = GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+
+	/* Look for MXM connectors to determine whether we need patch the VBIOS
+	 * connector info table. Look for null entries to determine whether we
+	 * need to compact connector table. */
+	for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+		object = &connector_tbl->asObjects[i];
+		object_id = object_id_from_bios_object_id(le16_to_cpu(object->usObjectID));
+
+		if ((OBJECT_TYPE_CONNECTOR == object_id.type) &&
+				(CONNECTOR_ID_MXM == object_id.id)) {
+			/* Once we found MXM connector - we can break */
+			mxm_connector_found = true;
+			break;
+		} else if (OBJECT_TYPE_CONNECTOR != object_id.type) {
+			/* We need to continue looping - to check if MXM
+			 * connector present */
+			null_entry_found = true;
+		}
+	}
+
+	/* Patch BIOS image */
+	if (mxm_connector_found || null_entry_found) {
+		uint32_t connectors_num = 0;
+		uint8_t *original_bios;
+		/* Step 1: Replace bios image with the new copy which will be
+		 * patched */
+		bp->base.bios_local_image = dm_alloc(bp->base.bios_size);
+		if (bp->base.bios_local_image == NULL) {
+			BREAK_TO_DEBUGGER();
+			/* Failed to alloc bp->base.bios_local_image */
+			return;
+		}
+
+		memmove(bp->base.bios_local_image, bp->base.bios, bp->base.bios_size);
+		original_bios = bp->base.bios;
+		bp->base.bios = bp->base.bios_local_image;
+		connector_tbl =
+				GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+
+		/* Step 2: (only if MXM connector found) Patch BIOS image with
+		 * info from external module */
+		if (mxm_connector_found &&
+		    patch_bios_image_from_ext_display_connection_info(bp) !=
+						BP_RESULT_OK) {
+			/* Patching the bios image has failed. We will copy
+			 * again original image provided and afterwards
+			 * only remove null entries */
+			memmove(
+					bp->base.bios_local_image,
+					original_bios,
+					bp->base.bios_size);
+		}
+
+		/* Step 3: Compact connector table (remove null entries, valid
+		 * entries moved to beginning) */
+		for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+			object = &connector_tbl->asObjects[i];
+			object_id = object_id_from_bios_object_id(
+					le16_to_cpu(object->usObjectID));
+
+			if (OBJECT_TYPE_CONNECTOR != object_id.type)
+				continue;
+
+			if (i != connectors_num) {
+				memmove(
+						&connector_tbl->
+						asObjects[connectors_num],
+						object,
+						sizeof(ATOM_OBJECT));
+			}
+			++connectors_num;
+		}
+		connector_tbl->ucNumberOfObjects = (uint8_t)connectors_num;
+	}
+}
+
+static void bios_parser_post_init(struct dc_bios *dcb)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	process_ext_display_connection_info(bp);
+}
+
+/**
+ * bios_parser_set_scratch_critical_state
+ *
+ * @brief
+ *  update critical state bit in VBIOS scratch register
+ *
+ * @param
+ *  bool - to set or reset state
+ */
+static void bios_parser_set_scratch_critical_state(
+	struct dc_bios *dcb,
+	bool state)
+{
+	bios_set_scratch_critical_state(dcb, state);
+}
+
+/*
+ * get_integrated_info_v8
+ *
+ * @brief
+ * Get V8 integrated BIOS information
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ *                  BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result get_integrated_info_v8(
+	struct bios_parser *bp,
+	struct integrated_info *info)
+{
+	ATOM_INTEGRATED_SYSTEM_INFO_V1_8 *info_v8;
+	uint32_t i;
+
+	info_v8 = GET_IMAGE(ATOM_INTEGRATED_SYSTEM_INFO_V1_8,
+			bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
+
+	if (info_v8 == NULL)
+		return BP_RESULT_BADBIOSTABLE;
+	info->boot_up_engine_clock = le32_to_cpu(info_v8->ulBootUpEngineClock) * 10;
+	info->dentist_vco_freq = le32_to_cpu(info_v8->ulDentistVCOFreq) * 10;
+	info->boot_up_uma_clock = le32_to_cpu(info_v8->ulBootUpUMAClock) * 10;
+
+	for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+		/* Convert [10KHz] into [KHz] */
+		info->disp_clk_voltage[i].max_supported_clk =
+			le32_to_cpu(info_v8->sDISPCLK_Voltage[i].
+				    ulMaximumSupportedCLK) * 10;
+		info->disp_clk_voltage[i].voltage_index =
+			le32_to_cpu(info_v8->sDISPCLK_Voltage[i].ulVoltageIndex);
+	}
+
+	info->boot_up_req_display_vector =
+		le32_to_cpu(info_v8->ulBootUpReqDisplayVector);
+	info->gpu_cap_info =
+		le32_to_cpu(info_v8->ulGPUCapInfo);
+
+	/*
+	 * system_config: Bit[0] = 0 : PCIE power gating disabled
+	 *                       = 1 : PCIE power gating enabled
+	 *                Bit[1] = 0 : DDR-PLL shut down disabled
+	 *                       = 1 : DDR-PLL shut down enabled
+	 *                Bit[2] = 0 : DDR-PLL power down disabled
+	 *                       = 1 : DDR-PLL power down enabled
+	 */
+	info->system_config = le32_to_cpu(info_v8->ulSystemConfig);
+	info->cpu_cap_info = le32_to_cpu(info_v8->ulCPUCapInfo);
+	info->boot_up_nb_voltage =
+		le16_to_cpu(info_v8->usBootUpNBVoltage);
+	info->ext_disp_conn_info_offset =
+		le16_to_cpu(info_v8->usExtDispConnInfoOffset);
+	info->memory_type = info_v8->ucMemoryType;
+	info->ma_channel_number = info_v8->ucUMAChannelNumber;
+	info->gmc_restore_reset_time =
+		le32_to_cpu(info_v8->ulGMCRestoreResetTime);
+
+	info->minimum_n_clk =
+		le32_to_cpu(info_v8->ulNbpStateNClkFreq[0]);
+	for (i = 1; i < 4; ++i)
+		info->minimum_n_clk =
+			info->minimum_n_clk < le32_to_cpu(info_v8->ulNbpStateNClkFreq[i]) ?
+			info->minimum_n_clk : le32_to_cpu(info_v8->ulNbpStateNClkFreq[i]);
+
+	info->idle_n_clk = le32_to_cpu(info_v8->ulIdleNClk);
+	info->ddr_dll_power_up_time =
+		le32_to_cpu(info_v8->ulDDR_DLL_PowerUpTime);
+	info->ddr_pll_power_up_time =
+		le32_to_cpu(info_v8->ulDDR_PLL_PowerUpTime);
+	info->pcie_clk_ss_type = le16_to_cpu(info_v8->usPCIEClkSSType);
+	info->lvds_ss_percentage =
+		le16_to_cpu(info_v8->usLvdsSSPercentage);
+	info->lvds_sspread_rate_in_10hz =
+		le16_to_cpu(info_v8->usLvdsSSpreadRateIn10Hz);
+	info->hdmi_ss_percentage =
+		le16_to_cpu(info_v8->usHDMISSPercentage);
+	info->hdmi_sspread_rate_in_10hz =
+		le16_to_cpu(info_v8->usHDMISSpreadRateIn10Hz);
+	info->dvi_ss_percentage =
+		le16_to_cpu(info_v8->usDVISSPercentage);
+	info->dvi_sspread_rate_in_10_hz =
+		le16_to_cpu(info_v8->usDVISSpreadRateIn10Hz);
+
+	info->max_lvds_pclk_freq_in_single_link =
+		le16_to_cpu(info_v8->usMaxLVDSPclkFreqInSingleLink);
+	info->lvds_misc = info_v8->ucLvdsMisc;
+	info->lvds_pwr_on_seq_dig_on_to_de_in_4ms =
+		info_v8->ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+	info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms =
+		info_v8->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+	info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms =
+		info_v8->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+	info->lvds_pwr_off_seq_vary_bl_to_de_in4ms =
+		info_v8->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+	info->lvds_pwr_off_seq_de_to_dig_on_in4ms =
+		info_v8->ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+	info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms =
+		info_v8->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+	info->lvds_off_to_on_delay_in_4ms =
+		info_v8->ucLVDSOffToOnDelay_in4Ms;
+	info->lvds_bit_depth_control_val =
+		le32_to_cpu(info_v8->ulLCDBitDepthControlVal);
+
+	for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) {
+		/* Convert [10KHz] into [KHz] */
+		info->avail_s_clk[i].supported_s_clk =
+			le32_to_cpu(info_v8->sAvail_SCLK[i].ulSupportedSCLK) * 10;
+		info->avail_s_clk[i].voltage_index =
+			le16_to_cpu(info_v8->sAvail_SCLK[i].usVoltageIndex);
+		info->avail_s_clk[i].voltage_id =
+			le16_to_cpu(info_v8->sAvail_SCLK[i].usVoltageID);
+	}
+
+	for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
+		info->ext_disp_conn_info.gu_id[i] =
+			info_v8->sExtDispConnInfo.ucGuid[i];
+	}
+
+	for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
+		info->ext_disp_conn_info.path[i].device_connector_id =
+			object_id_from_bios_object_id(
+				le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceConnector));
+
+		info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
+			object_id_from_bios_object_id(
+				le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usExtEncoderObjId));
+
+		info->ext_disp_conn_info.path[i].device_tag =
+			le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceTag);
+		info->ext_disp_conn_info.path[i].device_acpi_enum =
+			le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceACPIEnum);
+		info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
+			info_v8->sExtDispConnInfo.sPath[i].ucExtAUXDDCLutIndex;
+		info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
+			info_v8->sExtDispConnInfo.sPath[i].ucExtHPDPINLutIndex;
+		info->ext_disp_conn_info.path[i].channel_mapping.raw =
+			info_v8->sExtDispConnInfo.sPath[i].ucChannelMapping;
+	}
+	info->ext_disp_conn_info.checksum =
+		info_v8->sExtDispConnInfo.ucChecksum;
+
+	return BP_RESULT_OK;
+}
+
+/*
+ * get_integrated_info_v8
+ *
+ * @brief
+ * Get V8 integrated BIOS information
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ *                  BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result get_integrated_info_v9(
+	struct bios_parser *bp,
+	struct integrated_info *info)
+{
+	ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info_v9;
+	uint32_t i;
+
+	info_v9 = GET_IMAGE(ATOM_INTEGRATED_SYSTEM_INFO_V1_9,
+			bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
+
+	if (!info_v9)
+		return BP_RESULT_BADBIOSTABLE;
+
+	info->boot_up_engine_clock = le32_to_cpu(info_v9->ulBootUpEngineClock) * 10;
+	info->dentist_vco_freq = le32_to_cpu(info_v9->ulDentistVCOFreq) * 10;
+	info->boot_up_uma_clock = le32_to_cpu(info_v9->ulBootUpUMAClock) * 10;
+
+	for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+		/* Convert [10KHz] into [KHz] */
+		info->disp_clk_voltage[i].max_supported_clk =
+			le32_to_cpu(info_v9->sDISPCLK_Voltage[i].ulMaximumSupportedCLK) * 10;
+		info->disp_clk_voltage[i].voltage_index =
+			le32_to_cpu(info_v9->sDISPCLK_Voltage[i].ulVoltageIndex);
+	}
+
+	info->boot_up_req_display_vector =
+		le32_to_cpu(info_v9->ulBootUpReqDisplayVector);
+	info->gpu_cap_info = le32_to_cpu(info_v9->ulGPUCapInfo);
+
+	/*
+	 * system_config: Bit[0] = 0 : PCIE power gating disabled
+	 *                       = 1 : PCIE power gating enabled
+	 *                Bit[1] = 0 : DDR-PLL shut down disabled
+	 *                       = 1 : DDR-PLL shut down enabled
+	 *                Bit[2] = 0 : DDR-PLL power down disabled
+	 *                       = 1 : DDR-PLL power down enabled
+	 */
+	info->system_config = le32_to_cpu(info_v9->ulSystemConfig);
+	info->cpu_cap_info = le32_to_cpu(info_v9->ulCPUCapInfo);
+	info->boot_up_nb_voltage = le16_to_cpu(info_v9->usBootUpNBVoltage);
+	info->ext_disp_conn_info_offset = le16_to_cpu(info_v9->usExtDispConnInfoOffset);
+	info->memory_type = info_v9->ucMemoryType;
+	info->ma_channel_number = info_v9->ucUMAChannelNumber;
+	info->gmc_restore_reset_time = le32_to_cpu(info_v9->ulGMCRestoreResetTime);
+
+	info->minimum_n_clk = le32_to_cpu(info_v9->ulNbpStateNClkFreq[0]);
+	for (i = 1; i < 4; ++i)
+		info->minimum_n_clk =
+			info->minimum_n_clk < le32_to_cpu(info_v9->ulNbpStateNClkFreq[i]) ?
+			info->minimum_n_clk : le32_to_cpu(info_v9->ulNbpStateNClkFreq[i]);
+
+	info->idle_n_clk = le32_to_cpu(info_v9->ulIdleNClk);
+	info->ddr_dll_power_up_time = le32_to_cpu(info_v9->ulDDR_DLL_PowerUpTime);
+	info->ddr_pll_power_up_time = le32_to_cpu(info_v9->ulDDR_PLL_PowerUpTime);
+	info->pcie_clk_ss_type = le16_to_cpu(info_v9->usPCIEClkSSType);
+	info->lvds_ss_percentage = le16_to_cpu(info_v9->usLvdsSSPercentage);
+	info->lvds_sspread_rate_in_10hz = le16_to_cpu(info_v9->usLvdsSSpreadRateIn10Hz);
+	info->hdmi_ss_percentage = le16_to_cpu(info_v9->usHDMISSPercentage);
+	info->hdmi_sspread_rate_in_10hz = le16_to_cpu(info_v9->usHDMISSpreadRateIn10Hz);
+	info->dvi_ss_percentage = le16_to_cpu(info_v9->usDVISSPercentage);
+	info->dvi_sspread_rate_in_10_hz = le16_to_cpu(info_v9->usDVISSpreadRateIn10Hz);
+
+	info->max_lvds_pclk_freq_in_single_link =
+		le16_to_cpu(info_v9->usMaxLVDSPclkFreqInSingleLink);
+	info->lvds_misc = info_v9->ucLvdsMisc;
+	info->lvds_pwr_on_seq_dig_on_to_de_in_4ms =
+		info_v9->ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+	info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms =
+		info_v9->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+	info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms =
+		info_v9->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+	info->lvds_pwr_off_seq_vary_bl_to_de_in4ms =
+		info_v9->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+	info->lvds_pwr_off_seq_de_to_dig_on_in4ms =
+		info_v9->ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+	info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms =
+		info_v9->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+	info->lvds_off_to_on_delay_in_4ms =
+		info_v9->ucLVDSOffToOnDelay_in4Ms;
+	info->lvds_bit_depth_control_val =
+		le32_to_cpu(info_v9->ulLCDBitDepthControlVal);
+
+	for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) {
+		/* Convert [10KHz] into [KHz] */
+		info->avail_s_clk[i].supported_s_clk =
+			le32_to_cpu(info_v9->sAvail_SCLK[i].ulSupportedSCLK) * 10;
+		info->avail_s_clk[i].voltage_index =
+			le16_to_cpu(info_v9->sAvail_SCLK[i].usVoltageIndex);
+		info->avail_s_clk[i].voltage_id =
+			le16_to_cpu(info_v9->sAvail_SCLK[i].usVoltageID);
+	}
+
+	for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
+		info->ext_disp_conn_info.gu_id[i] =
+			info_v9->sExtDispConnInfo.ucGuid[i];
+	}
+
+	for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
+		info->ext_disp_conn_info.path[i].device_connector_id =
+			object_id_from_bios_object_id(
+				le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceConnector));
+
+		info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
+			object_id_from_bios_object_id(
+				le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usExtEncoderObjId));
+
+		info->ext_disp_conn_info.path[i].device_tag =
+			le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceTag);
+		info->ext_disp_conn_info.path[i].device_acpi_enum =
+			le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceACPIEnum);
+		info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
+			info_v9->sExtDispConnInfo.sPath[i].ucExtAUXDDCLutIndex;
+		info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
+			info_v9->sExtDispConnInfo.sPath[i].ucExtHPDPINLutIndex;
+		info->ext_disp_conn_info.path[i].channel_mapping.raw =
+			info_v9->sExtDispConnInfo.sPath[i].ucChannelMapping;
+	}
+	info->ext_disp_conn_info.checksum =
+		info_v9->sExtDispConnInfo.ucChecksum;
+
+	return BP_RESULT_OK;
+}
+
+/*
+ * construct_integrated_info
+ *
+ * @brief
+ * Get integrated BIOS information based on table revision
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ *                  BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result construct_integrated_info(
+	struct bios_parser *bp,
+	struct integrated_info *info)
+{
+	enum bp_result result = BP_RESULT_BADBIOSTABLE;
+
+	ATOM_COMMON_TABLE_HEADER *header;
+	struct atom_data_revision revision;
+
+	if (bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo) {
+		header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+				bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
+
+		get_atom_data_table_revision(header, &revision);
+
+		/* Don't need to check major revision as they are all 1 */
+		switch (revision.minor) {
+		case 8:
+			result = get_integrated_info_v8(bp, info);
+			break;
+		case 9:
+			result = get_integrated_info_v9(bp, info);
+			break;
+		default:
+			return result;
+
+		}
+	}
+
+	/* Sort voltage table from low to high*/
+	if (result == BP_RESULT_OK) {
+		struct clock_voltage_caps temp = {0, 0};
+		uint32_t i;
+		uint32_t j;
+
+		for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+			for (j = i; j > 0; --j) {
+				if (
+						info->disp_clk_voltage[j].max_supported_clk <
+						info->disp_clk_voltage[j-1].max_supported_clk) {
+					/* swap j and j - 1*/
+					temp = info->disp_clk_voltage[j-1];
+					info->disp_clk_voltage[j-1] =
+							info->disp_clk_voltage[j];
+					info->disp_clk_voltage[j] = temp;
+				}
+			}
+		}
+
+	}
+
+	return result;
+}
+
+static struct integrated_info *bios_parser_create_integrated_info(
+	struct dc_bios *dcb)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	struct integrated_info *info = NULL;
+
+	info = dm_alloc(sizeof(struct integrated_info));
+
+	if (info == NULL) {
+		ASSERT_CRITICAL(0);
+		return NULL;
+	}
+
+	if (construct_integrated_info(bp, info) == BP_RESULT_OK)
+		return info;
+
+	dm_free(info);
+
+	return NULL;
+}
+
+/******************************************************************************/
+
+static const struct dc_vbios_funcs vbios_funcs = {
+	.get_connectors_number = bios_parser_get_connectors_number,
+
+	.get_encoder_id = bios_parser_get_encoder_id,
+
+	.get_connector_id = bios_parser_get_connector_id,
+
+	.get_dst_number = bios_parser_get_dst_number,
+
+	.get_gpio_record = bios_parser_get_gpio_record,
+
+	.get_src_obj = bios_parser_get_src_obj,
+
+	.get_dst_obj = bios_parser_get_dst_obj,
+
+	.get_i2c_info = bios_parser_get_i2c_info,
+
+	.get_voltage_ddc_info = bios_parser_get_voltage_ddc_info,
+
+	.get_thermal_ddc_info = bios_parser_get_thermal_ddc_info,
+
+	.get_hpd_info = bios_parser_get_hpd_info,
+
+	.get_device_tag = bios_parser_get_device_tag,
+
+	.get_firmware_info = bios_parser_get_firmware_info,
+
+	.get_spread_spectrum_info = bios_parser_get_spread_spectrum_info,
+
+	.get_ss_entry_number = bios_parser_get_ss_entry_number,
+
+	.get_embedded_panel_info = bios_parser_get_embedded_panel_info,
+
+	.get_gpio_pin_info = bios_parser_get_gpio_pin_info,
+
+	.get_embedded_panel_info = bios_parser_get_embedded_panel_info,
+
+	.get_gpio_pin_info = bios_parser_get_gpio_pin_info,
+
+	.get_encoder_cap_info = bios_parser_get_encoder_cap_info,
+
+	/* bios scratch register communication */
+	.is_accelerated_mode = bios_is_accelerated_mode,
+
+	.set_scratch_critical_state = bios_parser_set_scratch_critical_state,
+
+	.is_device_id_supported = bios_parser_is_device_id_supported,
+
+	/* COMMANDS */
+	.encoder_control = bios_parser_encoder_control,
+
+	.transmitter_control = bios_parser_transmitter_control,
+
+	.crt_control = bios_parser_crt_control,  /* not used in DAL3.  keep for now in case we need to support VGA on Bonaire */
+
+	.enable_crtc = bios_parser_enable_crtc,
+
+	.adjust_pixel_clock = bios_parser_adjust_pixel_clock,
+
+	.set_pixel_clock = bios_parser_set_pixel_clock,
+
+	.set_dce_clock = bios_parser_set_dce_clock,
+
+	.enable_spread_spectrum_on_ppll = bios_parser_enable_spread_spectrum_on_ppll,
+
+	.program_crtc_timing = bios_parser_program_crtc_timing, /* still use.  should probably retire and program directly */
+
+	.crtc_source_select = bios_parser_crtc_source_select,  /* still use.  should probably retire and program directly */
+
+	.program_display_engine_pll = bios_parser_program_display_engine_pll,
+
+	.enable_disp_power_gating = bios_parser_enable_disp_power_gating,
+
+	/* SW init and patch */
+	.post_init = bios_parser_post_init,  /* patch vbios table for mxm module by reading i2c */
+
+	.bios_parser_destroy = bios_parser_destroy,
+};
+
+static bool bios_parser_construct(
+	struct bios_parser *bp,
+	struct bp_init_data *init,
+	enum dce_version dce_version)
+{
+	uint16_t *rom_header_offset = NULL;
+	ATOM_ROM_HEADER *rom_header = NULL;
+	ATOM_OBJECT_HEADER *object_info_tbl;
+	struct atom_data_revision tbl_rev = {0};
+
+	if (!init)
+		return false;
+
+	if (!init->bios)
+		return false;
+
+	bp->base.funcs = &vbios_funcs;
+	bp->base.bios = init->bios;
+	bp->base.bios_size = bp->base.bios[BIOS_IMAGE_SIZE_OFFSET] * BIOS_IMAGE_SIZE_UNIT;
+
+	bp->base.ctx = init->ctx;
+	bp->base.bios_local_image = NULL;
+
+	rom_header_offset =
+	GET_IMAGE(uint16_t, OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER);
+
+	if (!rom_header_offset)
+		return false;
+
+	rom_header = GET_IMAGE(ATOM_ROM_HEADER, *rom_header_offset);
+
+	if (!rom_header)
+		return false;
+
+	get_atom_data_table_revision(&rom_header->sHeader, &tbl_rev);
+	if (tbl_rev.major >= 2 && tbl_rev.minor >= 2)
+		return false;
+
+	bp->master_data_tbl =
+	GET_IMAGE(ATOM_MASTER_DATA_TABLE,
+		rom_header->usMasterDataTableOffset);
+
+	if (!bp->master_data_tbl)
+		return false;
+
+	bp->object_info_tbl_offset = DATA_TABLES(Object_Header);
+
+	if (!bp->object_info_tbl_offset)
+		return false;
+
+	object_info_tbl =
+	GET_IMAGE(ATOM_OBJECT_HEADER, bp->object_info_tbl_offset);
+
+	if (!object_info_tbl)
+		return false;
+
+	get_atom_data_table_revision(&object_info_tbl->sHeader,
+		&bp->object_info_tbl.revision);
+
+	if (bp->object_info_tbl.revision.major == 1
+		&& bp->object_info_tbl.revision.minor >= 3) {
+		ATOM_OBJECT_HEADER_V3 *tbl_v3;
+
+		tbl_v3 = GET_IMAGE(ATOM_OBJECT_HEADER_V3,
+			bp->object_info_tbl_offset);
+		if (!tbl_v3)
+			return false;
+
+		bp->object_info_tbl.v1_3 = tbl_v3;
+	} else if (bp->object_info_tbl.revision.major == 1
+		&& bp->object_info_tbl.revision.minor >= 1)
+		bp->object_info_tbl.v1_1 = object_info_tbl;
+	else
+		return false;
+
+	dal_bios_parser_init_cmd_tbl(bp);
+	dal_bios_parser_init_cmd_tbl_helper(&bp->cmd_helper, dce_version);
+
+	bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
+
+	return true;
+}
+
+/******************************************************************************/

+ 33 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser.h

@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_H__
+#define __DAL_BIOS_PARSER_H__
+
+struct dc_bios *bios_parser_create(
+	struct bp_init_data *init,
+	enum dce_version dce_version);
+
+#endif

+ 82 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c

@@ -0,0 +1,82 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+#include "bios_parser_helper.h"
+#include "command_table_helper.h"
+#include "command_table.h"
+#include "bios_parser_types_internal.h"
+
+uint8_t *get_image(struct dc_bios *bp,
+	uint32_t offset,
+	uint32_t size)
+{
+	if (bp->bios && offset + size < bp->bios_size)
+		return bp->bios + offset;
+	else
+		return NULL;
+}
+
+#include "reg_helper.h"
+
+#define CTX \
+	bios->ctx
+#define REG(reg)\
+	(bios->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+		ATOM_ ## field_name ## _SHIFT, ATOM_ ## field_name
+
+bool bios_is_accelerated_mode(
+	struct dc_bios *bios)
+{
+	uint32_t acc_mode;
+	REG_GET(BIOS_SCRATCH_6, S6_ACC_MODE, &acc_mode);
+	return (acc_mode == 1);
+}
+
+
+void bios_set_scratch_acc_mode_change(
+	struct dc_bios *bios)
+{
+	REG_UPDATE(BIOS_SCRATCH_6, S6_ACC_MODE, 1);
+}
+
+
+void bios_set_scratch_critical_state(
+	struct dc_bios *bios,
+	bool state)
+{
+	uint32_t critial_state = state ? 1 : 0;
+	REG_UPDATE(BIOS_SCRATCH_6, S6_CRITICAL_STATE, critial_state);
+}
+
+
+

+ 40 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h

@@ -0,0 +1,40 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_HELPER_H__
+#define __DAL_BIOS_PARSER_HELPER_H__
+
+struct bios_parser;
+
+uint8_t *get_image(struct dc_bios *bp, uint32_t offset,
+	uint32_t size);
+
+bool bios_is_accelerated_mode(struct dc_bios *bios);
+void bios_set_scratch_acc_mode_change(struct dc_bios *bios);
+void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
+
+#define GET_IMAGE(type, offset) ((type *) get_image(&bp->base, offset, sizeof(type)))
+
+#endif

+ 50 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c

@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/logger_interface.h"
+
+#include "bios_parser_interface.h"
+#include "bios_parser.h"
+
+
+struct dc_bios *dal_bios_parser_create(
+	struct bp_init_data *init,
+	enum dce_version dce_version)
+{
+	struct dc_bios *bios = NULL;
+
+	bios = bios_parser_create(init, dce_version);
+
+	return bios;
+}
+
+void dal_bios_parser_destroy(struct dc_bios **dcb)
+{
+	struct dc_bios *bios = *dcb;
+
+	bios->funcs->bios_parser_destroy(dcb);
+}
+

+ 72 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h

@@ -0,0 +1,72 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_TYPES_BIOS_H__
+#define __DAL_BIOS_PARSER_TYPES_BIOS_H__
+
+#include "dc_bios_types.h"
+#include "bios_parser_helper.h"
+
+struct atom_data_revision {
+	uint32_t major;
+	uint32_t minor;
+};
+
+struct object_info_table {
+	struct atom_data_revision revision;
+	union {
+		ATOM_OBJECT_HEADER *v1_1;
+		ATOM_OBJECT_HEADER_V3 *v1_3;
+	};
+};
+
+enum spread_spectrum_id {
+	SS_ID_UNKNOWN = 0,
+	SS_ID_DP1 = 0xf1,
+	SS_ID_DP2 = 0xf2,
+	SS_ID_LVLINK_2700MHZ = 0xf3,
+	SS_ID_LVLINK_1620MHZ = 0xf4
+};
+
+struct bios_parser {
+	struct dc_bios base;
+
+	struct object_info_table object_info_tbl;
+	uint32_t object_info_tbl_offset;
+	ATOM_MASTER_DATA_TABLE *master_data_tbl;
+
+	const struct bios_parser_helper *bios_helper;
+
+	const struct command_table_helper *cmd_helper;
+	struct cmd_tbl cmd_tbl;
+
+	bool remap_device_tags;
+};
+
+/* Bios Parser from DC Bios */
+#define BP_FROM_DCB(dc_bios) \
+	container_of(dc_bios, struct bios_parser, base)
+
+#endif

+ 2609 - 0
drivers/gpu/drm/amd/display/dc/bios/command_table.c

@@ -0,0 +1,2609 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_interface.h"
+
+#include "command_table.h"
+#include "command_table_helper.h"
+#include "bios_parser_helper.h"
+#include "bios_parser_types_internal.h"
+
+#define EXEC_BIOS_CMD_TABLE(command, params)\
+	(cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \
+		GetIndexIntoMasterTable(COMMAND, command), \
+		&params) == 0)
+
+#define BIOS_CMD_TABLE_REVISION(command, frev, crev)\
+	cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \
+		GetIndexIntoMasterTable(COMMAND, command), &frev, &crev)
+
+#define BIOS_CMD_TABLE_PARA_REVISION(command)\
+	bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \
+		GetIndexIntoMasterTable(COMMAND, command))
+
+static void init_dig_encoder_control(struct bios_parser *bp);
+static void init_transmitter_control(struct bios_parser *bp);
+static void init_set_pixel_clock(struct bios_parser *bp);
+static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp);
+static void init_adjust_display_pll(struct bios_parser *bp);
+static void init_dac_encoder_control(struct bios_parser *bp);
+static void init_dac_output_control(struct bios_parser *bp);
+static void init_blank_crtc(struct bios_parser *bp);
+static void init_set_crtc_timing(struct bios_parser *bp);
+static void init_set_crtc_overscan(struct bios_parser *bp);
+static void init_select_crtc_source(struct bios_parser *bp);
+static void init_enable_crtc(struct bios_parser *bp);
+static void init_enable_crtc_mem_req(struct bios_parser *bp);
+static void init_compute_memore_engine_pll(struct bios_parser *bp);
+static void init_external_encoder_control(struct bios_parser *bp);
+static void init_enable_disp_power_gating(struct bios_parser *bp);
+static void init_program_clock(struct bios_parser *bp);
+static void init_set_dce_clock(struct bios_parser *bp);
+
+void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
+{
+	init_dig_encoder_control(bp);
+	init_transmitter_control(bp);
+	init_set_pixel_clock(bp);
+	init_enable_spread_spectrum_on_ppll(bp);
+	init_adjust_display_pll(bp);
+	init_dac_encoder_control(bp);
+	init_dac_output_control(bp);
+	init_blank_crtc(bp);
+	init_set_crtc_timing(bp);
+	init_set_crtc_overscan(bp);
+	init_select_crtc_source(bp);
+	init_enable_crtc(bp);
+	init_enable_crtc_mem_req(bp);
+	init_program_clock(bp);
+	init_compute_memore_engine_pll(bp);
+	init_external_encoder_control(bp);
+	init_enable_disp_power_gating(bp);
+	init_set_dce_clock(bp);
+}
+
+static uint32_t bios_cmd_table_para_revision(void *cgs_device,
+					     uint32_t index)
+{
+	uint8_t frev, crev;
+
+	if (cgs_atom_get_cmd_table_revs(cgs_device,
+					index,
+					&frev, &crev) != 0)
+		return 0;
+	return crev;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  D I G E N C O D E R C O N T R O L
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result encoder_control_digx_v3(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl);
+
+static enum bp_result encoder_control_digx_v4(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl);
+
+#ifdef LATEST_ATOM_BIOS_SUPPORT
+static enum bp_result encoder_control_digx_v5(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl);
+#endif
+
+static void init_encoder_control_dig_v1(struct bios_parser *bp);
+
+static void init_dig_encoder_control(struct bios_parser *bp)
+{
+	uint32_t version =
+		BIOS_CMD_TABLE_PARA_REVISION(DIGxEncoderControl);
+
+	switch (version) {
+	case 2:
+		bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v3;
+		break;
+	case 4:
+		bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v4;
+		break;
+
+#ifdef LATEST_ATOM_BIOS_SUPPORT
+	case 5:
+		bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v5;
+		break;
+#endif
+
+	default:
+		init_encoder_control_dig_v1(bp);
+		break;
+	}
+}
+
+static enum bp_result encoder_control_dig_v1(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl);
+static enum bp_result encoder_control_dig1_v1(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl);
+static enum bp_result encoder_control_dig2_v1(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl);
+
+static void init_encoder_control_dig_v1(struct bios_parser *bp)
+{
+	struct cmd_tbl *cmd_tbl = &bp->cmd_tbl;
+
+	if (1 == BIOS_CMD_TABLE_PARA_REVISION(DIG1EncoderControl))
+		cmd_tbl->encoder_control_dig1 = encoder_control_dig1_v1;
+	else
+		cmd_tbl->encoder_control_dig1 = NULL;
+
+	if (1 == BIOS_CMD_TABLE_PARA_REVISION(DIG2EncoderControl))
+		cmd_tbl->encoder_control_dig2 = encoder_control_dig2_v1;
+	else
+		cmd_tbl->encoder_control_dig2 = NULL;
+
+	cmd_tbl->dig_encoder_control = encoder_control_dig_v1;
+}
+
+static enum bp_result encoder_control_dig_v1(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	struct cmd_tbl *cmd_tbl = &bp->cmd_tbl;
+
+	if (cntl != NULL)
+		switch (cntl->engine_id) {
+		case ENGINE_ID_DIGA:
+			if (cmd_tbl->encoder_control_dig1 != NULL)
+				result =
+					cmd_tbl->encoder_control_dig1(bp, cntl);
+			break;
+		case ENGINE_ID_DIGB:
+			if (cmd_tbl->encoder_control_dig2 != NULL)
+				result =
+					cmd_tbl->encoder_control_dig2(bp, cntl);
+			break;
+
+		default:
+			break;
+		}
+
+	return result;
+}
+
+static enum bp_result encoder_control_dig1_v1(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DIG_ENCODER_CONTROL_PARAMETERS_V2 params = {0};
+
+	bp->cmd_helper->assign_control_parameter(bp->cmd_helper, cntl, &params);
+
+	if (EXEC_BIOS_CMD_TABLE(DIG1EncoderControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result encoder_control_dig2_v1(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DIG_ENCODER_CONTROL_PARAMETERS_V2 params = {0};
+
+	bp->cmd_helper->assign_control_parameter(bp->cmd_helper, cntl, &params);
+
+	if (EXEC_BIOS_CMD_TABLE(DIG2EncoderControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result encoder_control_digx_v3(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DIG_ENCODER_CONTROL_PARAMETERS_V3 params = {0};
+
+	if (LANE_COUNT_FOUR < cntl->lanes_number)
+		params.acConfig.ucDPLinkRate = 1; /* dual link 2.7GHz */
+	else
+		params.acConfig.ucDPLinkRate = 0; /* single link 1.62GHz */
+
+	params.acConfig.ucDigSel = (uint8_t)(cntl->engine_id);
+
+	/* We need to convert from KHz units into 10KHz units */
+	params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+	params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+	params.ucEncoderMode =
+			(uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+					cntl->signal,
+					cntl->enable_dp_audio);
+	params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
+	if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result encoder_control_digx_v4(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DIG_ENCODER_CONTROL_PARAMETERS_V4 params = {0};
+
+	if (LANE_COUNT_FOUR < cntl->lanes_number)
+		params.acConfig.ucDPLinkRate = 1; /* dual link 2.7GHz */
+	else
+		params.acConfig.ucDPLinkRate = 0; /* single link 1.62GHz */
+
+	params.acConfig.ucDigSel = (uint8_t)(cntl->engine_id);
+
+	/* We need to convert from KHz units into 10KHz units */
+	params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+	params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+	params.ucEncoderMode =
+			(uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+					cntl->signal,
+					cntl->enable_dp_audio));
+	params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
+	if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+#ifdef LATEST_ATOM_BIOS_SUPPORT
+static enum bp_result encoder_control_digx_v5(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	ENCODER_STREAM_SETUP_PARAMETERS_V5 params = {0};
+
+	params.ucDigId = (uint8_t)(cntl->engine_id);
+	params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+
+	params.ulPixelClock = cntl->pixel_clock / 10;
+	params.ucDigMode =
+			(uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+					cntl->signal,
+					cntl->enable_dp_audio));
+	params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
+	switch (cntl->color_depth) {
+	case COLOR_DEPTH_888:
+		params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+		break;
+	case COLOR_DEPTH_101010:
+		params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+		break;
+	case COLOR_DEPTH_121212:
+		params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+		break;
+	case COLOR_DEPTH_161616:
+		params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+		break;
+	default:
+		break;
+	}
+
+	if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+		switch (cntl->color_depth) {
+		case COLOR_DEPTH_101010:
+			params.ulPixelClock =
+				(params.ulPixelClock * 30) / 24;
+			break;
+		case COLOR_DEPTH_121212:
+			params.ulPixelClock =
+				(params.ulPixelClock * 36) / 24;
+			break;
+		case COLOR_DEPTH_161616:
+			params.ulPixelClock =
+				(params.ulPixelClock * 48) / 24;
+			break;
+		default:
+			break;
+		}
+
+	if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+#endif
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  TRANSMITTER CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result transmitter_control_v2(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v3(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v4(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v1_5(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v1_6(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl);
+
+static void init_transmitter_control(struct bios_parser *bp)
+{
+	uint8_t frev;
+	uint8_t crev;
+
+	if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl,
+			frev, crev) != 0)
+		BREAK_TO_DEBUGGER();
+	switch (crev) {
+	case 2:
+		bp->cmd_tbl.transmitter_control = transmitter_control_v2;
+		break;
+	case 3:
+		bp->cmd_tbl.transmitter_control = transmitter_control_v3;
+		break;
+	case 4:
+		bp->cmd_tbl.transmitter_control = transmitter_control_v4;
+		break;
+	case 5:
+		bp->cmd_tbl.transmitter_control = transmitter_control_v1_5;
+		break;
+	case 6:
+		bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
+		break;
+	default:
+		bp->cmd_tbl.transmitter_control = NULL;
+		break;
+	}
+}
+
+static enum bp_result transmitter_control_v2(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 params;
+	enum connector_id connector_id =
+		dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
+
+	memset(&params, 0, sizeof(params));
+
+	switch (cntl->transmitter) {
+	case TRANSMITTER_UNIPHY_A:
+	case TRANSMITTER_UNIPHY_B:
+	case TRANSMITTER_UNIPHY_C:
+	case TRANSMITTER_UNIPHY_D:
+	case TRANSMITTER_UNIPHY_E:
+	case TRANSMITTER_UNIPHY_F:
+	case TRANSMITTER_TRAVIS_LCD:
+		break;
+	default:
+		return BP_RESULT_BADINPUT;
+	}
+
+	switch (cntl->action) {
+	case TRANSMITTER_CONTROL_INIT:
+		if ((CONNECTOR_ID_DUAL_LINK_DVII == connector_id) ||
+				(CONNECTOR_ID_DUAL_LINK_DVID == connector_id))
+			/* on INIT this bit should be set according to the
+			 * phisycal connector
+			 * Bit0: dual link connector flag
+			 * =0 connector is single link connector
+			 * =1 connector is dual link connector
+			 */
+			params.acConfig.fDualLinkConnector = 1;
+
+		/* connector object id */
+		params.usInitInfo =
+				cpu_to_le16((uint8_t)cntl->connector_obj_id.id);
+		break;
+	case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
+		/* votage swing and pre-emphsis */
+		params.asMode.ucLaneSel = (uint8_t)cntl->lane_select;
+		params.asMode.ucLaneSet = (uint8_t)cntl->lane_settings;
+		break;
+	default:
+		/* if dual-link */
+		if (LANE_COUNT_FOUR < cntl->lanes_number) {
+			/* on ENABLE/DISABLE this bit should be set according to
+			 * actual timing (number of lanes)
+			 * Bit0: dual link connector flag
+			 * =0 connector is single link connector
+			 * =1 connector is dual link connector
+			 */
+			params.acConfig.fDualLinkConnector = 1;
+
+			/* link rate, half for dual link
+			 * We need to convert from KHz units into 20KHz units
+			 */
+			params.usPixelClock =
+					cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
+		} else
+			/* link rate, half for dual link
+			 * We need to convert from KHz units into 10KHz units
+			 */
+			params.usPixelClock =
+					cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+		break;
+	}
+
+	/* 00 - coherent mode
+	 * 01 - incoherent mode
+	 */
+
+	params.acConfig.fCoherentMode = cntl->coherent;
+
+	if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
+			|| (TRANSMITTER_UNIPHY_D == cntl->transmitter)
+			|| (TRANSMITTER_UNIPHY_F == cntl->transmitter))
+		/* Bit2: Transmitter Link selection
+		 * =0 when bit0=0, single link A/C/E, when bit0=1,
+		 * master link A/C/E
+		 * =1 when bit0=0, single link B/D/F, when bit0=1,
+		 * master link B/D/F
+		 */
+		params.acConfig.ucLinkSel = 1;
+
+	if (ENGINE_ID_DIGB == cntl->engine_id)
+		/* Bit3: Transmitter data source selection
+		 * =0 DIGA is data source.
+		 * =1 DIGB is data source.
+		 * This bit is only useful when ucAction= ATOM_ENABLE
+		 */
+		params.acConfig.ucEncoderSel = 1;
+
+	if (CONNECTOR_ID_DISPLAY_PORT == connector_id)
+		/* Bit4: DP connector flag
+		 * =0 connector is none-DP connector
+		 * =1 connector is DP connector
+		 */
+		params.acConfig.fDPConnector = 1;
+
+	/* Bit[7:6]: Transmitter selection
+	 * =0 UNIPHY_ENCODER: UNIPHYA/B
+	 * =1 UNIPHY1_ENCODER: UNIPHYC/D
+	 * =2 UNIPHY2_ENCODER: UNIPHYE/F
+	 * =3 reserved
+	 */
+	params.acConfig.ucTransmitterSel =
+			(uint8_t)bp->cmd_helper->transmitter_bp_to_atom(
+					cntl->transmitter);
+
+	params.ucAction = (uint8_t)cntl->action;
+
+	if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result transmitter_control_v3(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 params;
+	uint32_t pll_id;
+	enum connector_id conn_id =
+			dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
+	const struct command_table_helper *cmd = bp->cmd_helper;
+	bool dual_link_conn = (CONNECTOR_ID_DUAL_LINK_DVII == conn_id)
+					|| (CONNECTOR_ID_DUAL_LINK_DVID == conn_id);
+
+	memset(&params, 0, sizeof(params));
+
+	switch (cntl->transmitter) {
+	case TRANSMITTER_UNIPHY_A:
+	case TRANSMITTER_UNIPHY_B:
+	case TRANSMITTER_UNIPHY_C:
+	case TRANSMITTER_UNIPHY_D:
+	case TRANSMITTER_UNIPHY_E:
+	case TRANSMITTER_UNIPHY_F:
+	case TRANSMITTER_TRAVIS_LCD:
+		break;
+	default:
+		return BP_RESULT_BADINPUT;
+	}
+
+	if (!cmd->clock_source_id_to_atom(cntl->pll_id, &pll_id))
+		return BP_RESULT_BADINPUT;
+
+	/* fill information based on the action */
+	switch (cntl->action) {
+	case TRANSMITTER_CONTROL_INIT:
+		if (dual_link_conn) {
+			/* on INIT this bit should be set according to the
+			 * phisycal connector
+			 * Bit0: dual link connector flag
+			 * =0 connector is single link connector
+			 * =1 connector is dual link connector
+			 */
+			params.acConfig.fDualLinkConnector = 1;
+		}
+
+		/* connector object id */
+		params.usInitInfo =
+				cpu_to_le16((uint8_t)(cntl->connector_obj_id.id));
+		break;
+	case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
+		/* votage swing and pre-emphsis */
+		params.asMode.ucLaneSel = (uint8_t)cntl->lane_select;
+		params.asMode.ucLaneSet = (uint8_t)cntl->lane_settings;
+		break;
+	default:
+		if (dual_link_conn && cntl->multi_path)
+			/* on ENABLE/DISABLE this bit should be set according to
+			 * actual timing (number of lanes)
+			 * Bit0: dual link connector flag
+			 * =0 connector is single link connector
+			 * =1 connector is dual link connector
+			 */
+			params.acConfig.fDualLinkConnector = 1;
+
+		/* if dual-link */
+		if (LANE_COUNT_FOUR < cntl->lanes_number) {
+			/* on ENABLE/DISABLE this bit should be set according to
+			 * actual timing (number of lanes)
+			 * Bit0: dual link connector flag
+			 * =0 connector is single link connector
+			 * =1 connector is dual link connector
+			 */
+			params.acConfig.fDualLinkConnector = 1;
+
+			/* link rate, half for dual link
+			 * We need to convert from KHz units into 20KHz units
+			 */
+			params.usPixelClock =
+					cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
+		} else {
+			/* link rate, half for dual link
+			 * We need to convert from KHz units into 10KHz units
+			 */
+			params.usPixelClock =
+					cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+		}
+		break;
+	}
+
+	/* 00 - coherent mode
+	 * 01 - incoherent mode
+	 */
+
+	params.acConfig.fCoherentMode = cntl->coherent;
+
+	if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
+		|| (TRANSMITTER_UNIPHY_D == cntl->transmitter)
+		|| (TRANSMITTER_UNIPHY_F == cntl->transmitter))
+		/* Bit2: Transmitter Link selection
+		 * =0 when bit0=0, single link A/C/E, when bit0=1,
+		 * master link A/C/E
+		 * =1 when bit0=0, single link B/D/F, when bit0=1,
+		 * master link B/D/F
+		 */
+		params.acConfig.ucLinkSel = 1;
+
+	if (ENGINE_ID_DIGB == cntl->engine_id)
+		/* Bit3: Transmitter data source selection
+		 * =0 DIGA is data source.
+		 * =1 DIGB is data source.
+		 * This bit is only useful when ucAction= ATOM_ENABLE
+		 */
+		params.acConfig.ucEncoderSel = 1;
+
+	/* Bit[7:6]: Transmitter selection
+	 * =0 UNIPHY_ENCODER: UNIPHYA/B
+	 * =1 UNIPHY1_ENCODER: UNIPHYC/D
+	 * =2 UNIPHY2_ENCODER: UNIPHYE/F
+	 * =3 reserved
+	 */
+	params.acConfig.ucTransmitterSel =
+			(uint8_t)cmd->transmitter_bp_to_atom(cntl->transmitter);
+
+	params.ucLaneNum = (uint8_t)cntl->lanes_number;
+
+	params.acConfig.ucRefClkSource = (uint8_t)pll_id;
+
+	params.ucAction = (uint8_t)cntl->action;
+
+	if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result transmitter_control_v4(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 params;
+	uint32_t ref_clk_src_id;
+	enum connector_id conn_id =
+			dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
+	const struct command_table_helper *cmd = bp->cmd_helper;
+
+	memset(&params, 0, sizeof(params));
+
+	switch (cntl->transmitter) {
+	case TRANSMITTER_UNIPHY_A:
+	case TRANSMITTER_UNIPHY_B:
+	case TRANSMITTER_UNIPHY_C:
+	case TRANSMITTER_UNIPHY_D:
+	case TRANSMITTER_UNIPHY_E:
+	case TRANSMITTER_UNIPHY_F:
+	case TRANSMITTER_TRAVIS_LCD:
+		break;
+	default:
+		return BP_RESULT_BADINPUT;
+	}
+
+	if (!cmd->clock_source_id_to_ref_clk_src(cntl->pll_id, &ref_clk_src_id))
+		return BP_RESULT_BADINPUT;
+
+	switch (cntl->action) {
+	case TRANSMITTER_CONTROL_INIT:
+	{
+		if ((CONNECTOR_ID_DUAL_LINK_DVII == conn_id) ||
+				(CONNECTOR_ID_DUAL_LINK_DVID == conn_id))
+			/* on INIT this bit should be set according to the
+			 * phisycal connector
+			 * Bit0: dual link connector flag
+			 * =0 connector is single link connector
+			 * =1 connector is dual link connector
+			 */
+			params.acConfig.fDualLinkConnector = 1;
+
+		/* connector object id */
+		params.usInitInfo =
+				cpu_to_le16((uint8_t)(cntl->connector_obj_id.id));
+	}
+	break;
+	case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
+		/* votage swing and pre-emphsis */
+		params.asMode.ucLaneSel = (uint8_t)(cntl->lane_select);
+		params.asMode.ucLaneSet = (uint8_t)(cntl->lane_settings);
+		break;
+	default:
+		if ((CONNECTOR_ID_DUAL_LINK_DVII == conn_id) ||
+				(CONNECTOR_ID_DUAL_LINK_DVID == conn_id))
+			/* on ENABLE/DISABLE this bit should be set according to
+			 * actual timing (number of lanes)
+			 * Bit0: dual link connector flag
+			 * =0 connector is single link connector
+			 * =1 connector is dual link connector
+			 */
+			params.acConfig.fDualLinkConnector = 1;
+
+		/* if dual-link */
+		if (LANE_COUNT_FOUR < cntl->lanes_number)
+			/* link rate, half for dual link
+			 * We need to convert from KHz units into 20KHz units
+			 */
+			params.usPixelClock =
+					cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
+		else {
+			/* link rate, half for dual link
+			 * We need to convert from KHz units into 10KHz units
+			 */
+			params.usPixelClock =
+					cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+		}
+		break;
+	}
+
+	/* 00 - coherent mode
+	 * 01 - incoherent mode
+	 */
+
+	params.acConfig.fCoherentMode = cntl->coherent;
+
+	if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
+		|| (TRANSMITTER_UNIPHY_D == cntl->transmitter)
+		|| (TRANSMITTER_UNIPHY_F == cntl->transmitter))
+		/* Bit2: Transmitter Link selection
+		 * =0 when bit0=0, single link A/C/E, when bit0=1,
+		 * master link A/C/E
+		 * =1 when bit0=0, single link B/D/F, when bit0=1,
+		 * master link B/D/F
+		 */
+		params.acConfig.ucLinkSel = 1;
+
+	if (ENGINE_ID_DIGB == cntl->engine_id)
+		/* Bit3: Transmitter data source selection
+		 * =0 DIGA is data source.
+		 * =1 DIGB is data source.
+		 * This bit is only useful when ucAction= ATOM_ENABLE
+		 */
+		params.acConfig.ucEncoderSel = 1;
+
+	/* Bit[7:6]: Transmitter selection
+	 * =0 UNIPHY_ENCODER: UNIPHYA/B
+	 * =1 UNIPHY1_ENCODER: UNIPHYC/D
+	 * =2 UNIPHY2_ENCODER: UNIPHYE/F
+	 * =3 reserved
+	 */
+	params.acConfig.ucTransmitterSel =
+		(uint8_t)(cmd->transmitter_bp_to_atom(cntl->transmitter));
+	params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+	params.acConfig.ucRefClkSource = (uint8_t)(ref_clk_src_id);
+	params.ucAction = (uint8_t)(cntl->action);
+
+	if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result transmitter_control_v1_5(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	const struct command_table_helper *cmd = bp->cmd_helper;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 params;
+
+	memset(&params, 0, sizeof(params));
+	params.ucPhyId = cmd->phy_id_to_atom(cntl->transmitter);
+	params.ucAction = (uint8_t)cntl->action;
+	params.ucLaneNum = (uint8_t)cntl->lanes_number;
+	params.ucConnObjId = (uint8_t)cntl->connector_obj_id.id;
+
+	params.ucDigMode =
+		cmd->signal_type_to_atom_dig_mode(cntl->signal);
+	params.asConfig.ucPhyClkSrcId =
+		cmd->clock_source_id_to_atom_phy_clk_src_id(cntl->pll_id);
+	/* 00 - coherent mode */
+	params.asConfig.ucCoherentMode = cntl->coherent;
+	params.asConfig.ucHPDSel =
+		cmd->hpd_sel_to_atom(cntl->hpd_sel);
+	params.ucDigEncoderSel =
+		cmd->dig_encoder_sel_to_atom(cntl->engine_id);
+	params.ucDPLaneSet = (uint8_t) cntl->lane_settings;
+	params.usSymClock = cpu_to_le16((uint16_t) (cntl->pixel_clock / 10));
+	/*
+	 * In SI/TN case, caller have to set usPixelClock as following:
+	 * DP mode: usPixelClock = DP_LINK_CLOCK/10
+	 * (DP_LINK_CLOCK = 1.62GHz, 2.7GHz, 5.4GHz)
+	 * DVI single link mode: usPixelClock = pixel clock
+	 * DVI dual link mode: usPixelClock = pixel clock
+	 * HDMI mode: usPixelClock = pixel clock * deep_color_ratio
+	 * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
+	 * LVDS mode: usPixelClock = pixel clock
+	 */
+
+	if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result transmitter_control_v1_6(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+#ifdef LATEST_ATOM_BIOS_SUPPORT
+	const struct command_table_helper *cmd = bp->cmd_helper;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6 params;
+
+	memset(&params, 0, sizeof(params));
+	params.ucPhyId = cmd->phy_id_to_atom(cntl->transmitter);
+	params.ucAction = (uint8_t)cntl->action;
+
+	if (cntl->action == TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS)
+		params.ucDPLaneSet = (uint8_t)cntl->lane_settings;
+	else
+		params.ucDigMode = cmd->signal_type_to_atom_dig_mode(cntl->signal);
+
+	params.ucLaneNum = (uint8_t)cntl->lanes_number;
+	params.ucHPDSel = cmd->hpd_sel_to_atom(cntl->hpd_sel);
+	params.ucDigEncoderSel = cmd->dig_encoder_sel_to_atom(cntl->engine_id);
+	params.ucConnObjId = (uint8_t)cntl->connector_obj_id.id;
+	params.ulSymClock = cntl->pixel_clock/10;
+
+	/*
+	 * In SI/TN case, caller have to set usPixelClock as following:
+	 * DP mode: usPixelClock = DP_LINK_CLOCK/10
+	 * (DP_LINK_CLOCK = 1.62GHz, 2.7GHz, 5.4GHz)
+	 * DVI single link mode: usPixelClock = pixel clock
+	 * DVI dual link mode: usPixelClock = pixel clock
+	 * HDMI mode: usPixelClock = pixel clock * deep_color_ratio
+	 * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
+	 * LVDS mode: usPixelClock = pixel clock
+	 */
+	switch (cntl->signal) {
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		switch (cntl->color_depth) {
+		case COLOR_DEPTH_101010:
+			params.ulSymClock =
+				cpu_to_le16((le16_to_cpu(params.ulSymClock) * 30) / 24);
+			break;
+		case COLOR_DEPTH_121212:
+			params.ulSymClock =
+				cpu_to_le16((le16_to_cpu(params.ulSymClock) * 36) / 24);
+			break;
+		case COLOR_DEPTH_161616:
+			params.ulSymClock =
+				cpu_to_le16((le16_to_cpu(params.ulSymClock) * 48) / 24);
+			break;
+		default:
+			break;
+		}
+		break;
+		default:
+			break;
+	}
+
+	if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+		result = BP_RESULT_OK;
+#endif
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  SET PIXEL CLOCK
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result set_pixel_clock_v3(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_v5(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_v6(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_v7(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params);
+
+static void init_set_pixel_clock(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)) {
+	case 3:
+		bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v3;
+		break;
+	case 5:
+		bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v5;
+		break;
+	case 6:
+		bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v6;
+		break;
+	case 7:
+		bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
+		break;
+	default:
+		bp->cmd_tbl.set_pixel_clock = NULL;
+		break;
+	}
+}
+
+static enum bp_result set_pixel_clock_v3(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	PIXEL_CLOCK_PARAMETERS_V3 *params;
+	SET_PIXEL_CLOCK_PS_ALLOCATION allocation;
+
+	memset(&allocation, 0, sizeof(allocation));
+
+	if (CLOCK_SOURCE_ID_PLL1 == bp_params->pll_id)
+		allocation.sPCLKInput.ucPpll = ATOM_PPLL1;
+	else if (CLOCK_SOURCE_ID_PLL2 == bp_params->pll_id)
+		allocation.sPCLKInput.ucPpll = ATOM_PPLL2;
+	else
+		return BP_RESULT_BADINPUT;
+
+	allocation.sPCLKInput.usRefDiv =
+			cpu_to_le16((uint16_t)bp_params->reference_divider);
+	allocation.sPCLKInput.usFbDiv =
+			cpu_to_le16((uint16_t)bp_params->feedback_divider);
+	allocation.sPCLKInput.ucFracFbDiv =
+			(uint8_t)bp_params->fractional_feedback_divider;
+	allocation.sPCLKInput.ucPostDiv =
+			(uint8_t)bp_params->pixel_clock_post_divider;
+
+	/* We need to convert from KHz units into 10KHz units */
+	allocation.sPCLKInput.usPixelClock =
+			cpu_to_le16((uint16_t)(bp_params->target_pixel_clock / 10));
+
+	params = (PIXEL_CLOCK_PARAMETERS_V3 *)&allocation.sPCLKInput;
+	params->ucTransmitterId =
+			bp->cmd_helper->encoder_id_to_atom(
+					dal_graphics_object_id_get_encoder_id(
+							bp_params->encoder_object_id));
+	params->ucEncoderMode =
+			(uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+					bp_params->signal_type, false));
+
+	if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+		params->ucMiscInfo |= PIXEL_CLOCK_MISC_FORCE_PROG_PPLL;
+
+	if (bp_params->flags.USE_E_CLOCK_AS_SOURCE_FOR_D_CLOCK)
+		params->ucMiscInfo |= PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK;
+
+	if (CONTROLLER_ID_D1 != bp_params->controller_id)
+		params->ucMiscInfo |= PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
+
+	if (EXEC_BIOS_CMD_TABLE(SetPixelClock, allocation))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+#ifndef SET_PIXEL_CLOCK_PS_ALLOCATION_V5
+/* video bios did not define this: */
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION_V5 {
+	PIXEL_CLOCK_PARAMETERS_V5 sPCLKInput;
+	/* Caller doesn't need to init this portion */
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;
+} SET_PIXEL_CLOCK_PS_ALLOCATION_V5;
+#endif
+
+#ifndef SET_PIXEL_CLOCK_PS_ALLOCATION_V6
+/* video bios did not define this: */
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION_V6 {
+	PIXEL_CLOCK_PARAMETERS_V6 sPCLKInput;
+	/* Caller doesn't need to init this portion */
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;
+} SET_PIXEL_CLOCK_PS_ALLOCATION_V6;
+#endif
+
+static enum bp_result set_pixel_clock_v5(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	SET_PIXEL_CLOCK_PS_ALLOCATION_V5 clk;
+	uint8_t controller_id;
+	uint32_t pll_id;
+
+	memset(&clk, 0, sizeof(clk));
+
+	if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+			&& bp->cmd_helper->controller_id_to_atom(
+					bp_params->controller_id, &controller_id)) {
+		clk.sPCLKInput.ucCRTC = controller_id;
+		clk.sPCLKInput.ucPpll = (uint8_t)pll_id;
+		clk.sPCLKInput.ucRefDiv =
+				(uint8_t)(bp_params->reference_divider);
+		clk.sPCLKInput.usFbDiv =
+				cpu_to_le16((uint16_t)(bp_params->feedback_divider));
+		clk.sPCLKInput.ulFbDivDecFrac =
+				cpu_to_le32(bp_params->fractional_feedback_divider);
+		clk.sPCLKInput.ucPostDiv =
+				(uint8_t)(bp_params->pixel_clock_post_divider);
+		clk.sPCLKInput.ucTransmitterID =
+				bp->cmd_helper->encoder_id_to_atom(
+						dal_graphics_object_id_get_encoder_id(
+								bp_params->encoder_object_id));
+		clk.sPCLKInput.ucEncoderMode =
+				(uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+						bp_params->signal_type, false);
+
+		/* We need to convert from KHz units into 10KHz units */
+		clk.sPCLKInput.usPixelClock =
+				cpu_to_le16((uint16_t)(bp_params->target_pixel_clock / 10));
+
+		if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+			clk.sPCLKInput.ucMiscInfo |=
+					PIXEL_CLOCK_MISC_FORCE_PROG_PPLL;
+
+		if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+			clk.sPCLKInput.ucMiscInfo |=
+					PIXEL_CLOCK_MISC_REF_DIV_SRC;
+
+		/* clkV5.ucMiscInfo bit[3:2]= HDMI panel bit depth: =0: 24bpp
+		 * =1:30bpp, =2:32bpp
+		 * driver choose program it itself, i.e. here we program it
+		 * to 888 by default.
+		 */
+
+		if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+			result = BP_RESULT_OK;
+	}
+
+	return result;
+}
+
+static enum bp_result set_pixel_clock_v6(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	SET_PIXEL_CLOCK_PS_ALLOCATION_V6 clk;
+	uint8_t controller_id;
+	uint32_t pll_id;
+
+	memset(&clk, 0, sizeof(clk));
+
+	if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+			&& bp->cmd_helper->controller_id_to_atom(
+					bp_params->controller_id, &controller_id)) {
+		/* Note: VBIOS still wants to use ucCRTC name which is now
+		 * 1 byte in ULONG
+		 *typedef struct _CRTC_PIXEL_CLOCK_FREQ
+		 *{
+		 * target the pixel clock to drive the CRTC timing.
+		 * ULONG ulPixelClock:24;
+		 * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
+		 * previous version.
+		 * ATOM_CRTC1~6, indicate the CRTC controller to
+		 * ULONG ucCRTC:8;
+		 * drive the pixel clock. not used for DCPLL case.
+		 *}CRTC_PIXEL_CLOCK_FREQ;
+		 *union
+		 *{
+		 * pixel clock and CRTC id frequency
+		 * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
+		 * ULONG ulDispEngClkFreq; dispclk frequency
+		 *};
+		 */
+		clk.sPCLKInput.ulCrtcPclkFreq.ucCRTC = controller_id;
+		clk.sPCLKInput.ucPpll = (uint8_t) pll_id;
+		clk.sPCLKInput.ucRefDiv =
+				(uint8_t) bp_params->reference_divider;
+		clk.sPCLKInput.usFbDiv =
+				cpu_to_le16((uint16_t) bp_params->feedback_divider);
+		clk.sPCLKInput.ulFbDivDecFrac =
+				cpu_to_le32(bp_params->fractional_feedback_divider);
+		clk.sPCLKInput.ucPostDiv =
+				(uint8_t) bp_params->pixel_clock_post_divider;
+		clk.sPCLKInput.ucTransmitterID =
+				bp->cmd_helper->encoder_id_to_atom(
+						dal_graphics_object_id_get_encoder_id(
+								bp_params->encoder_object_id));
+		clk.sPCLKInput.ucEncoderMode =
+				(uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(
+						bp_params->signal_type, false);
+
+		/* We need to convert from KHz units into 10KHz units */
+		clk.sPCLKInput.ulCrtcPclkFreq.ulPixelClock =
+				cpu_to_le32(bp_params->target_pixel_clock / 10);
+
+		if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) {
+			clk.sPCLKInput.ucMiscInfo |=
+					PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL;
+		}
+
+		if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC) {
+			clk.sPCLKInput.ucMiscInfo |=
+					PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
+		}
+
+		/* clkV6.ucMiscInfo bit[3:2]= HDMI panel bit depth: =0:
+		 * 24bpp =1:30bpp, =2:32bpp
+		 * driver choose program it itself, i.e. here we pass required
+		 * target rate that includes deep color.
+		 */
+
+		if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+			result = BP_RESULT_OK;
+	}
+
+	return result;
+}
+
+static enum bp_result set_pixel_clock_v7(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+#ifdef LATEST_ATOM_BIOS_SUPPORT
+	PIXEL_CLOCK_PARAMETERS_V7 clk;
+	uint8_t controller_id;
+	uint32_t pll_id;
+
+	memset(&clk, 0, sizeof(clk));
+
+	if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+			&& bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &controller_id)) {
+		/* Note: VBIOS still wants to use ucCRTC name which is now
+		 * 1 byte in ULONG
+		 *typedef struct _CRTC_PIXEL_CLOCK_FREQ
+		 *{
+		 * target the pixel clock to drive the CRTC timing.
+		 * ULONG ulPixelClock:24;
+		 * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
+		 * previous version.
+		 * ATOM_CRTC1~6, indicate the CRTC controller to
+		 * ULONG ucCRTC:8;
+		 * drive the pixel clock. not used for DCPLL case.
+		 *}CRTC_PIXEL_CLOCK_FREQ;
+		 *union
+		 *{
+		 * pixel clock and CRTC id frequency
+		 * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
+		 * ULONG ulDispEngClkFreq; dispclk frequency
+		 *};
+		 */
+		clk.ucCRTC = controller_id;
+		clk.ucPpll = (uint8_t) pll_id;
+		clk.ucTransmitterID = bp->cmd_helper->encoder_id_to_atom(dal_graphics_object_id_get_encoder_id(bp_params->encoder_object_id));
+		clk.ucEncoderMode = (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(bp_params->signal_type, false);
+
+		/* We need to convert from KHz units into 10KHz units */
+		clk.ulPixelClock = cpu_to_le32(bp_params->target_pixel_clock * 10);
+
+		clk.ucDeepColorRatio = (uint8_t) bp->cmd_helper->transmitter_color_depth_to_atom(bp_params->color_depth);
+
+		if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+			clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL;
+
+		if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+			clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC;
+
+		if (bp_params->flags.PROGRAM_PHY_PLL_ONLY)
+			clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_PROG_PHYPLL;
+
+		if (bp_params->flags.SUPPORT_YUV_420)
+			clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_YUV420_MODE;
+
+		if (bp_params->flags.SET_XTALIN_REF_SRC)
+			clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_XTALIN;
+
+		if (bp_params->flags.SET_GENLOCK_REF_DIV_SRC)
+			clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_GENLK;
+
+		if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
+			clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
+
+		if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+			result = BP_RESULT_OK;
+	}
+#endif
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  ENABLE PIXEL CLOCK SS
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result enable_spread_spectrum_on_ppll_v1(
+	struct bios_parser *bp,
+	struct bp_spread_spectrum_parameters *bp_params,
+	bool enable);
+static enum bp_result enable_spread_spectrum_on_ppll_v2(
+	struct bios_parser *bp,
+	struct bp_spread_spectrum_parameters *bp_params,
+	bool enable);
+static enum bp_result enable_spread_spectrum_on_ppll_v3(
+	struct bios_parser *bp,
+	struct bp_spread_spectrum_parameters *bp_params,
+	bool enable);
+
+static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL)) {
+	case 1:
+		bp->cmd_tbl.enable_spread_spectrum_on_ppll =
+				enable_spread_spectrum_on_ppll_v1;
+		break;
+	case 2:
+		bp->cmd_tbl.enable_spread_spectrum_on_ppll =
+				enable_spread_spectrum_on_ppll_v2;
+		break;
+	case 3:
+		bp->cmd_tbl.enable_spread_spectrum_on_ppll =
+				enable_spread_spectrum_on_ppll_v3;
+		break;
+	default:
+		bp->cmd_tbl.enable_spread_spectrum_on_ppll = NULL;
+		break;
+	}
+}
+
+static enum bp_result enable_spread_spectrum_on_ppll_v1(
+	struct bios_parser *bp,
+	struct bp_spread_spectrum_parameters *bp_params,
+	bool enable)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL params;
+
+	memset(&params, 0, sizeof(params));
+
+	if ((enable == true) && (bp_params->percentage > 0))
+		params.ucEnable = ATOM_ENABLE;
+	else
+		params.ucEnable = ATOM_DISABLE;
+
+	params.usSpreadSpectrumPercentage =
+			cpu_to_le16((uint16_t)bp_params->percentage);
+	params.ucSpreadSpectrumStep =
+			(uint8_t)bp_params->ver1.step;
+	params.ucSpreadSpectrumDelay =
+			(uint8_t)bp_params->ver1.delay;
+	/* convert back to unit of 10KHz */
+	params.ucSpreadSpectrumRange =
+			(uint8_t)(bp_params->ver1.range / 10000);
+
+	if (bp_params->flags.EXTERNAL_SS)
+		params.ucSpreadSpectrumType |= ATOM_EXTERNAL_SS_MASK;
+
+	if (bp_params->flags.CENTER_SPREAD)
+		params.ucSpreadSpectrumType |= ATOM_SS_CENTRE_SPREAD_MODE;
+
+	if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL1)
+		params.ucPpll = ATOM_PPLL1;
+	else if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL2)
+		params.ucPpll = ATOM_PPLL2;
+	else
+		BREAK_TO_DEBUGGER(); /* Unexpected PLL value!! */
+
+	if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result enable_spread_spectrum_on_ppll_v2(
+	struct bios_parser *bp,
+	struct bp_spread_spectrum_parameters *bp_params,
+	bool enable)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 params;
+
+	memset(&params, 0, sizeof(params));
+
+	if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL1)
+		params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V2_P1PLL;
+	else if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL2)
+		params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V2_P2PLL;
+	else
+		BREAK_TO_DEBUGGER(); /* Unexpected PLL value!! */
+
+	if ((enable == true) && (bp_params->percentage > 0)) {
+		params.ucEnable = ATOM_ENABLE;
+
+		params.usSpreadSpectrumPercentage =
+				cpu_to_le16((uint16_t)(bp_params->percentage));
+		params.usSpreadSpectrumStep =
+				cpu_to_le16((uint16_t)(bp_params->ds.ds_frac_size));
+
+		if (bp_params->flags.EXTERNAL_SS)
+			params.ucSpreadSpectrumType |=
+					ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD;
+
+		if (bp_params->flags.CENTER_SPREAD)
+			params.ucSpreadSpectrumType |=
+					ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD;
+
+		/* Both amounts need to be left shifted first before bit
+		 * comparison. Otherwise, the result will always be zero here
+		 */
+		params.usSpreadSpectrumAmount = cpu_to_le16((uint16_t)(
+				((bp_params->ds.feedback_amount <<
+						ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT) &
+						ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK) |
+						((bp_params->ds.nfrac_amount <<
+								ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
+								ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK)));
+	} else
+		params.ucEnable = ATOM_DISABLE;
+
+	if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result enable_spread_spectrum_on_ppll_v3(
+	struct bios_parser *bp,
+	struct bp_spread_spectrum_parameters *bp_params,
+	bool enable)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 params;
+
+	memset(&params, 0, sizeof(params));
+
+	switch (bp_params->pll_id) {
+	case CLOCK_SOURCE_ID_PLL0:
+		/* ATOM_PPLL_SS_TYPE_V3_P0PLL; this is pixel clock only,
+		 * not for SI display clock.
+		 */
+		params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_DCPLL;
+		break;
+	case CLOCK_SOURCE_ID_PLL1:
+		params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_P1PLL;
+		break;
+
+	case CLOCK_SOURCE_ID_PLL2:
+		params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_P2PLL;
+		break;
+
+	case CLOCK_SOURCE_ID_DCPLL:
+		params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_DCPLL;
+		break;
+
+	default:
+		BREAK_TO_DEBUGGER();
+		/* Unexpected PLL value!! */
+		return result;
+	}
+
+	if (enable == true) {
+		params.ucEnable = ATOM_ENABLE;
+
+		params.usSpreadSpectrumAmountFrac =
+				cpu_to_le16((uint16_t)(bp_params->ds_frac_amount));
+		params.usSpreadSpectrumStep =
+				cpu_to_le16((uint16_t)(bp_params->ds.ds_frac_size));
+
+		if (bp_params->flags.EXTERNAL_SS)
+			params.ucSpreadSpectrumType |=
+					ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD;
+		if (bp_params->flags.CENTER_SPREAD)
+			params.ucSpreadSpectrumType |=
+					ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD;
+
+		/* Both amounts need to be left shifted first before bit
+		 * comparison. Otherwise, the result will always be zero here
+		 */
+		params.usSpreadSpectrumAmount = cpu_to_le16((uint16_t)(
+				((bp_params->ds.feedback_amount <<
+						ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT) &
+						ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK) |
+						((bp_params->ds.nfrac_amount <<
+								ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT) &
+								ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK)));
+	} else
+		params.ucEnable = ATOM_DISABLE;
+
+	if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  ADJUST DISPLAY PLL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result adjust_display_pll_v2(
+	struct bios_parser *bp,
+	struct bp_adjust_pixel_clock_parameters *bp_params);
+static enum bp_result adjust_display_pll_v3(
+	struct bios_parser *bp,
+	struct bp_adjust_pixel_clock_parameters *bp_params);
+
+static void init_adjust_display_pll(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll)) {
+	case 2:
+		bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v2;
+		break;
+	case 3:
+		bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v3;
+		break;
+	default:
+		bp->cmd_tbl.adjust_display_pll = NULL;
+		break;
+	}
+}
+
+static enum bp_result adjust_display_pll_v2(
+	struct bios_parser *bp,
+	struct bp_adjust_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	ADJUST_DISPLAY_PLL_PS_ALLOCATION params = { 0 };
+
+	/* We need to convert from KHz units into 10KHz units and then convert
+	 * output pixel clock back 10KHz-->KHz */
+	uint32_t pixel_clock_10KHz_in = bp_params->pixel_clock / 10;
+
+	params.usPixelClock = cpu_to_le16((uint16_t)(pixel_clock_10KHz_in));
+	params.ucTransmitterID =
+			bp->cmd_helper->encoder_id_to_atom(
+					dal_graphics_object_id_get_encoder_id(
+							bp_params->encoder_object_id));
+	params.ucEncodeMode =
+			(uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+					bp_params->signal_type, false);
+	return result;
+}
+
+static enum bp_result adjust_display_pll_v3(
+	struct bios_parser *bp,
+	struct bp_adjust_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 params;
+	uint32_t pixel_clk_10_kHz_in = bp_params->pixel_clock / 10;
+
+	memset(&params, 0, sizeof(params));
+
+	/* We need to convert from KHz units into 10KHz units and then convert
+	 * output pixel clock back 10KHz-->KHz */
+	params.sInput.usPixelClock = cpu_to_le16((uint16_t)pixel_clk_10_kHz_in);
+	params.sInput.ucTransmitterID =
+			bp->cmd_helper->encoder_id_to_atom(
+					dal_graphics_object_id_get_encoder_id(
+							bp_params->encoder_object_id));
+	params.sInput.ucEncodeMode =
+			(uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+					bp_params->signal_type, false);
+
+	if (bp_params->ss_enable == true)
+		params.sInput.ucDispPllConfig |= DISPPLL_CONFIG_SS_ENABLE;
+
+	if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
+		params.sInput.ucDispPllConfig |= DISPPLL_CONFIG_DUAL_LINK;
+
+	if (EXEC_BIOS_CMD_TABLE(AdjustDisplayPll, params)) {
+		/* Convert output pixel clock back 10KHz-->KHz: multiply
+		 * original pixel clock in KHz by ratio
+		 * [output pxlClk/input pxlClk] */
+		uint64_t pixel_clk_10_khz_out =
+				(uint64_t)le32_to_cpu(params.sOutput.ulDispPllFreq);
+		uint64_t pixel_clk = (uint64_t)bp_params->pixel_clock;
+
+		if (pixel_clk_10_kHz_in != 0) {
+			bp_params->adjusted_pixel_clock =
+					div_u64(pixel_clk * pixel_clk_10_khz_out,
+							pixel_clk_10_kHz_in);
+		} else {
+			bp_params->adjusted_pixel_clock = 0;
+			BREAK_TO_DEBUGGER();
+		}
+
+		bp_params->reference_divider = params.sOutput.ucRefDiv;
+		bp_params->pixel_clock_post_divider = params.sOutput.ucPostDiv;
+
+		result = BP_RESULT_OK;
+	}
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  DAC ENCODER CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result dac1_encoder_control_v1(
+	struct bios_parser *bp,
+	bool enable,
+	uint32_t pixel_clock,
+	uint8_t dac_standard);
+static enum bp_result dac2_encoder_control_v1(
+	struct bios_parser *bp,
+	bool enable,
+	uint32_t pixel_clock,
+	uint8_t dac_standard);
+
+static void init_dac_encoder_control(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(DAC1EncoderControl)) {
+	case 1:
+		bp->cmd_tbl.dac1_encoder_control = dac1_encoder_control_v1;
+		break;
+	default:
+		bp->cmd_tbl.dac1_encoder_control = NULL;
+		break;
+	}
+	switch (BIOS_CMD_TABLE_PARA_REVISION(DAC2EncoderControl)) {
+	case 1:
+		bp->cmd_tbl.dac2_encoder_control = dac2_encoder_control_v1;
+		break;
+	default:
+		bp->cmd_tbl.dac2_encoder_control = NULL;
+		break;
+	}
+}
+
+static void dac_encoder_control_prepare_params(
+	DAC_ENCODER_CONTROL_PS_ALLOCATION *params,
+	bool enable,
+	uint32_t pixel_clock,
+	uint8_t dac_standard)
+{
+	params->ucDacStandard = dac_standard;
+	if (enable)
+		params->ucAction = ATOM_ENABLE;
+	else
+		params->ucAction = ATOM_DISABLE;
+
+	/* We need to convert from KHz units into 10KHz units
+	 * it looks as if the TvControl do not care about pixel clock
+	 */
+	params->usPixelClock = cpu_to_le16((uint16_t)(pixel_clock / 10));
+}
+
+static enum bp_result dac1_encoder_control_v1(
+	struct bios_parser *bp,
+	bool enable,
+	uint32_t pixel_clock,
+	uint8_t dac_standard)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DAC_ENCODER_CONTROL_PS_ALLOCATION params;
+
+	dac_encoder_control_prepare_params(
+		&params,
+		enable,
+		pixel_clock,
+		dac_standard);
+
+	if (EXEC_BIOS_CMD_TABLE(DAC1EncoderControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result dac2_encoder_control_v1(
+	struct bios_parser *bp,
+	bool enable,
+	uint32_t pixel_clock,
+	uint8_t dac_standard)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DAC_ENCODER_CONTROL_PS_ALLOCATION params;
+
+	dac_encoder_control_prepare_params(
+		&params,
+		enable,
+		pixel_clock,
+		dac_standard);
+
+	if (EXEC_BIOS_CMD_TABLE(DAC2EncoderControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  DAC OUTPUT CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result dac1_output_control_v1(
+	struct bios_parser *bp,
+	bool enable);
+static enum bp_result dac2_output_control_v1(
+	struct bios_parser *bp,
+	bool enable);
+
+static void init_dac_output_control(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(DAC1OutputControl)) {
+	case 1:
+		bp->cmd_tbl.dac1_output_control = dac1_output_control_v1;
+		break;
+	default:
+		bp->cmd_tbl.dac1_output_control = NULL;
+		break;
+	}
+	switch (BIOS_CMD_TABLE_PARA_REVISION(DAC2OutputControl)) {
+	case 1:
+		bp->cmd_tbl.dac2_output_control = dac2_output_control_v1;
+		break;
+	default:
+		bp->cmd_tbl.dac2_output_control = NULL;
+		break;
+	}
+}
+
+static enum bp_result dac1_output_control_v1(
+	struct bios_parser *bp, bool enable)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION params;
+
+	if (enable)
+		params.ucAction = ATOM_ENABLE;
+	else
+		params.ucAction = ATOM_DISABLE;
+
+	if (EXEC_BIOS_CMD_TABLE(DAC1OutputControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result dac2_output_control_v1(
+	struct bios_parser *bp, bool enable)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION params;
+
+	if (enable)
+		params.ucAction = ATOM_ENABLE;
+	else
+		params.ucAction = ATOM_DISABLE;
+
+	if (EXEC_BIOS_CMD_TABLE(DAC2OutputControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                 BLANK CRTC
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result blank_crtc_v1(
+	struct bios_parser *bp,
+	struct bp_blank_crtc_parameters *bp_params,
+	bool blank);
+
+static void init_blank_crtc(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(BlankCRTC)) {
+	case 1:
+		bp->cmd_tbl.blank_crtc = blank_crtc_v1;
+		break;
+	default:
+		bp->cmd_tbl.blank_crtc = NULL;
+		break;
+	}
+}
+
+static enum bp_result blank_crtc_v1(
+	struct bios_parser *bp,
+	struct bp_blank_crtc_parameters *bp_params,
+	bool blank)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	BLANK_CRTC_PARAMETERS params = {0};
+	uint8_t atom_controller_id;
+
+	if (bp->cmd_helper->controller_id_to_atom(bp_params->controller_id,
+			&atom_controller_id)) {
+		params.ucCRTC = (uint8_t)atom_controller_id;
+
+		if (blank)
+			params.ucBlanking = ATOM_BLANKING;
+		else
+			params.ucBlanking = ATOM_BLANKING_OFF;
+		params.usBlackColorRCr =
+				cpu_to_le16((uint16_t)bp_params->black_color_rcr);
+		params.usBlackColorGY =
+				cpu_to_le16((uint16_t)bp_params->black_color_gy);
+		params.usBlackColorBCb =
+				cpu_to_le16((uint16_t)bp_params->black_color_bcb);
+
+		if (EXEC_BIOS_CMD_TABLE(BlankCRTC, params))
+			result = BP_RESULT_OK;
+	} else
+		/* Not support more than two CRTC as current ASIC, update this
+		 * if needed.
+		 */
+		result = BP_RESULT_BADINPUT;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  SET CRTC TIMING
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+	struct bios_parser *bp,
+	struct bp_hw_crtc_timing_parameters *bp_params);
+static enum bp_result set_crtc_timing_v1(
+	struct bios_parser *bp,
+	struct bp_hw_crtc_timing_parameters *bp_params);
+
+static void init_set_crtc_timing(struct bios_parser *bp)
+{
+	uint32_t dtd_version =
+			BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_UsingDTDTiming);
+	if (dtd_version > 2)
+		switch (dtd_version) {
+		case 3:
+			bp->cmd_tbl.set_crtc_timing =
+					set_crtc_using_dtd_timing_v3;
+			break;
+		default:
+			bp->cmd_tbl.set_crtc_timing = NULL;
+			break;
+		}
+	else
+		switch (BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing)) {
+		case 1:
+			bp->cmd_tbl.set_crtc_timing = set_crtc_timing_v1;
+			break;
+		default:
+			bp->cmd_tbl.set_crtc_timing = NULL;
+			break;
+		}
+}
+
+static enum bp_result set_crtc_timing_v1(
+	struct bios_parser *bp,
+	struct bp_hw_crtc_timing_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION params = {0};
+	uint8_t atom_controller_id;
+
+	if (bp->cmd_helper->controller_id_to_atom(
+			bp_params->controller_id, &atom_controller_id))
+		params.ucCRTC = atom_controller_id;
+
+	params.usH_Total = cpu_to_le16((uint16_t)(bp_params->h_total));
+	params.usH_Disp = cpu_to_le16((uint16_t)(bp_params->h_addressable));
+	params.usH_SyncStart = cpu_to_le16((uint16_t)(bp_params->h_sync_start));
+	params.usH_SyncWidth = cpu_to_le16((uint16_t)(bp_params->h_sync_width));
+	params.usV_Total = cpu_to_le16((uint16_t)(bp_params->v_total));
+	params.usV_Disp = cpu_to_le16((uint16_t)(bp_params->v_addressable));
+	params.usV_SyncStart =
+			cpu_to_le16((uint16_t)(bp_params->v_sync_start));
+	params.usV_SyncWidth =
+			cpu_to_le16((uint16_t)(bp_params->v_sync_width));
+
+	/* VBIOS does not expect any value except zero into this call, for
+	 * underscan use another entry ProgramOverscan call but when mode
+	 * 1776x1000 with the overscan 72x44 .e.i. 1920x1080 @30 DAL2 is ok,
+	 * but when same ,but 60 Hz there is corruption
+	 * DAL1 does not allow the mode 1776x1000@60
+	 */
+	params.ucOverscanRight = (uint8_t)bp_params->h_overscan_right;
+	params.ucOverscanLeft = (uint8_t)bp_params->h_overscan_left;
+	params.ucOverscanBottom = (uint8_t)bp_params->v_overscan_bottom;
+	params.ucOverscanTop = (uint8_t)bp_params->v_overscan_top;
+
+	if (0 == bp_params->flags.HSYNC_POSITIVE_POLARITY)
+		params.susModeMiscInfo.usAccess =
+				cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_HSYNC_POLARITY);
+
+	if (0 == bp_params->flags.VSYNC_POSITIVE_POLARITY)
+		params.susModeMiscInfo.usAccess =
+				cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_VSYNC_POLARITY);
+
+	if (bp_params->flags.INTERLACE) {
+		params.susModeMiscInfo.usAccess =
+				cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_INTERLACE);
+
+		/* original DAL code has this condition to apply tis for
+		 * non-TV/CV only due to complex MV testing for possible
+		 * impact
+		 * if (pACParameters->signal != SignalType_YPbPr &&
+		 *  pACParameters->signal != SignalType_Composite &&
+		 *  pACParameters->signal != SignalType_SVideo)
+		 */
+		/* HW will deduct 0.5 line from 2nd feild.
+		 * i.e. for 1080i, it is 2 lines for 1st field, 2.5
+		 * lines for the 2nd feild. we need input as 5 instead
+		 * of 4, but it is 4 either from Edid data
+		 * (spec CEA 861) or CEA timing table.
+		 */
+		params.usV_SyncStart =
+				cpu_to_le16((uint16_t)(bp_params->v_sync_start + 1));
+	}
+
+	if (bp_params->flags.HORZ_COUNT_BY_TWO)
+		params.susModeMiscInfo.usAccess =
+				cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_DOUBLE_CLOCK_MODE);
+
+	if (EXEC_BIOS_CMD_TABLE(SetCRTC_Timing, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+	struct bios_parser *bp,
+	struct bp_hw_crtc_timing_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	SET_CRTC_USING_DTD_TIMING_PARAMETERS params = {0};
+	uint8_t atom_controller_id;
+
+	if (bp->cmd_helper->controller_id_to_atom(
+			bp_params->controller_id, &atom_controller_id))
+		params.ucCRTC = atom_controller_id;
+
+	/* bios usH_Size wants h addressable size */
+	params.usH_Size = cpu_to_le16((uint16_t)bp_params->h_addressable);
+	/* bios usH_Blanking_Time wants borders included in blanking */
+	params.usH_Blanking_Time =
+			cpu_to_le16((uint16_t)(bp_params->h_total - bp_params->h_addressable));
+	/* bios usV_Size wants v addressable size */
+	params.usV_Size = cpu_to_le16((uint16_t)bp_params->v_addressable);
+	/* bios usV_Blanking_Time wants borders included in blanking */
+	params.usV_Blanking_Time =
+			cpu_to_le16((uint16_t)(bp_params->v_total - bp_params->v_addressable));
+	/* bios usHSyncOffset is the offset from the end of h addressable,
+	 * our horizontalSyncStart is the offset from the beginning
+	 * of h addressable */
+	params.usH_SyncOffset =
+			cpu_to_le16((uint16_t)(bp_params->h_sync_start - bp_params->h_addressable));
+	params.usH_SyncWidth = cpu_to_le16((uint16_t)bp_params->h_sync_width);
+	/* bios usHSyncOffset is the offset from the end of v addressable,
+	 * our verticalSyncStart is the offset from the beginning of
+	 * v addressable */
+	params.usV_SyncOffset =
+			cpu_to_le16((uint16_t)(bp_params->v_sync_start - bp_params->v_addressable));
+	params.usV_SyncWidth = cpu_to_le16((uint16_t)bp_params->v_sync_width);
+
+	/* we assume that overscan from original timing does not get bigger
+	 * than 255
+	 * we will program all the borders in the Set CRTC Overscan call below
+	 */
+
+	if (0 == bp_params->flags.HSYNC_POSITIVE_POLARITY)
+		params.susModeMiscInfo.usAccess =
+				cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_HSYNC_POLARITY);
+
+	if (0 == bp_params->flags.VSYNC_POSITIVE_POLARITY)
+		params.susModeMiscInfo.usAccess =
+				cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_VSYNC_POLARITY);
+
+	if (bp_params->flags.INTERLACE)	{
+		params.susModeMiscInfo.usAccess =
+				cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_INTERLACE);
+
+		/* original DAL code has this condition to apply this
+		 * for non-TV/CV only
+		 * due to complex MV testing for possible impact
+		 * if ( pACParameters->signal != SignalType_YPbPr &&
+		 *  pACParameters->signal != SignalType_Composite &&
+		 *  pACParameters->signal != SignalType_SVideo)
+		 */
+		{
+			/* HW will deduct 0.5 line from 2nd feild.
+			 * i.e. for 1080i, it is 2 lines for 1st field,
+			 * 2.5 lines for the 2nd feild. we need input as 5
+			 * instead of 4.
+			 * but it is 4 either from Edid data (spec CEA 861)
+			 * or CEA timing table.
+			 */
+			params.usV_SyncOffset =
+					cpu_to_le16(le16_to_cpu(params.usV_SyncOffset) + 1);
+
+		}
+	}
+
+	if (bp_params->flags.HORZ_COUNT_BY_TWO)
+		params.susModeMiscInfo.usAccess =
+				cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_DOUBLE_CLOCK_MODE);
+
+	if (EXEC_BIOS_CMD_TABLE(SetCRTC_UsingDTDTiming, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  SET CRTC OVERSCAN
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result set_crtc_overscan_v1(
+	struct bios_parser *bp,
+	struct bp_hw_crtc_overscan_parameters *bp_params);
+
+static void init_set_crtc_overscan(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_OverScan))	{
+	case 1:
+		bp->cmd_tbl.set_crtc_overscan = set_crtc_overscan_v1;
+		break;
+	default:
+		bp->cmd_tbl.set_crtc_overscan = NULL;
+		break;
+	}
+}
+
+static enum bp_result set_crtc_overscan_v1(
+	struct bios_parser *bp,
+	struct bp_hw_crtc_overscan_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	SET_CRTC_OVERSCAN_PARAMETERS params = {0};
+	uint8_t atom_controller_id;
+
+	if (bp->cmd_helper->controller_id_to_atom(
+			bp_params->controller_id, &atom_controller_id))
+		params.ucCRTC = atom_controller_id;
+	else
+		return BP_RESULT_BADINPUT;
+
+	params.usOverscanRight =
+			cpu_to_le16((uint16_t)bp_params->h_overscan_right);
+	params.usOverscanLeft =
+			cpu_to_le16((uint16_t)bp_params->h_overscan_left);
+	params.usOverscanBottom =
+			cpu_to_le16((uint16_t)bp_params->v_overscan_bottom);
+	params.usOverscanTop =
+			cpu_to_le16((uint16_t)bp_params->v_overscan_top);
+
+	if (EXEC_BIOS_CMD_TABLE(SetCRTC_OverScan, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  SELECT CRTC SOURCE
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result select_crtc_source_v2(
+	struct bios_parser *bp,
+	struct bp_crtc_source_select *bp_params);
+static enum bp_result select_crtc_source_v3(
+	struct bios_parser *bp,
+	struct bp_crtc_source_select *bp_params);
+
+static void init_select_crtc_source(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) {
+	case 2:
+		bp->cmd_tbl.select_crtc_source = select_crtc_source_v2;
+		break;
+	case 3:
+		bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
+		break;
+	default:
+		bp->cmd_tbl.select_crtc_source = NULL;
+		break;
+	}
+}
+
+static enum bp_result select_crtc_source_v2(
+	struct bios_parser *bp,
+	struct bp_crtc_source_select *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	SELECT_CRTC_SOURCE_PARAMETERS_V2 params;
+	uint8_t atom_controller_id;
+	uint32_t atom_engine_id;
+	enum signal_type s = bp_params->signal;
+
+	memset(&params, 0, sizeof(params));
+
+	/* set controller id */
+	if (bp->cmd_helper->controller_id_to_atom(
+			bp_params->controller_id, &atom_controller_id))
+		params.ucCRTC = atom_controller_id;
+	else
+		return BP_RESULT_FAILURE;
+
+	/* set encoder id */
+	if (bp->cmd_helper->engine_bp_to_atom(
+			bp_params->engine_id, &atom_engine_id))
+		params.ucEncoderID = (uint8_t)atom_engine_id;
+	else
+		return BP_RESULT_FAILURE;
+
+	if (SIGNAL_TYPE_EDP == s ||
+			(SIGNAL_TYPE_DISPLAY_PORT == s &&
+					SIGNAL_TYPE_LVDS == bp_params->sink_signal))
+		s = SIGNAL_TYPE_LVDS;
+
+	params.ucEncodeMode =
+			(uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+					s, bp_params->enable_dp_audio);
+
+	if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result select_crtc_source_v3(
+	struct bios_parser *bp,
+	struct bp_crtc_source_select *bp_params)
+{
+	bool result = BP_RESULT_FAILURE;
+	SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
+	uint8_t atom_controller_id;
+	uint32_t atom_engine_id;
+	enum signal_type s = bp_params->signal;
+
+	memset(&params, 0, sizeof(params));
+
+	if (bp->cmd_helper->controller_id_to_atom(bp_params->controller_id,
+			&atom_controller_id))
+		params.ucCRTC = atom_controller_id;
+	else
+		return result;
+
+	if (bp->cmd_helper->engine_bp_to_atom(bp_params->engine_id,
+			&atom_engine_id))
+		params.ucEncoderID = (uint8_t)atom_engine_id;
+	else
+		return result;
+
+	if (SIGNAL_TYPE_EDP == s ||
+			(SIGNAL_TYPE_DISPLAY_PORT == s &&
+					SIGNAL_TYPE_LVDS == bp_params->sink_signal))
+		s = SIGNAL_TYPE_LVDS;
+
+	params.ucEncodeMode =
+			bp->cmd_helper->encoder_mode_bp_to_atom(
+					s, bp_params->enable_dp_audio);
+	/* Needed for VBIOS Random Spatial Dithering feature */
+	params.ucDstBpc = (uint8_t)(bp_params->display_output_bit_depth);
+
+	if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  ENABLE CRTC
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result enable_crtc_v1(
+	struct bios_parser *bp,
+	enum controller_id controller_id,
+	bool enable);
+
+static void init_enable_crtc(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC)) {
+	case 1:
+		bp->cmd_tbl.enable_crtc = enable_crtc_v1;
+		break;
+	default:
+		bp->cmd_tbl.enable_crtc = NULL;
+		break;
+	}
+}
+
+static enum bp_result enable_crtc_v1(
+	struct bios_parser *bp,
+	enum controller_id controller_id,
+	bool enable)
+{
+	bool result = BP_RESULT_FAILURE;
+	ENABLE_CRTC_PARAMETERS params = {0};
+	uint8_t id;
+
+	if (bp->cmd_helper->controller_id_to_atom(controller_id, &id))
+		params.ucCRTC = id;
+	else
+		return BP_RESULT_BADINPUT;
+
+	if (enable)
+		params.ucEnable = ATOM_ENABLE;
+	else
+		params.ucEnable = ATOM_DISABLE;
+
+	if (EXEC_BIOS_CMD_TABLE(EnableCRTC, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  ENABLE CRTC MEM REQ
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result enable_crtc_mem_req_v1(
+	struct bios_parser *bp,
+	enum controller_id controller_id,
+	bool enable);
+
+static void init_enable_crtc_mem_req(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(EnableCRTCMemReq)) {
+	case 1:
+		bp->cmd_tbl.enable_crtc_mem_req = enable_crtc_mem_req_v1;
+		break;
+	default:
+		bp->cmd_tbl.enable_crtc_mem_req = NULL;
+		break;
+	}
+}
+
+static enum bp_result enable_crtc_mem_req_v1(
+	struct bios_parser *bp,
+	enum controller_id controller_id,
+	bool enable)
+{
+	bool result = BP_RESULT_BADINPUT;
+	ENABLE_CRTC_PARAMETERS params = {0};
+	uint8_t id;
+
+	if (bp->cmd_helper->controller_id_to_atom(controller_id, &id)) {
+		params.ucCRTC = id;
+
+		if (enable)
+			params.ucEnable = ATOM_ENABLE;
+		else
+			params.ucEnable = ATOM_DISABLE;
+
+		if (EXEC_BIOS_CMD_TABLE(EnableCRTCMemReq, params))
+			result = BP_RESULT_OK;
+		else
+			result = BP_RESULT_FAILURE;
+	}
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  DISPLAY PLL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result program_clock_v5(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result program_clock_v6(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params);
+
+static void init_program_clock(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)) {
+	case 5:
+		bp->cmd_tbl.program_clock = program_clock_v5;
+		break;
+	case 6:
+		bp->cmd_tbl.program_clock = program_clock_v6;
+		break;
+	default:
+		bp->cmd_tbl.program_clock = NULL;
+		break;
+	}
+}
+
+static enum bp_result program_clock_v5(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+
+	SET_PIXEL_CLOCK_PS_ALLOCATION_V5 params;
+	uint32_t atom_pll_id;
+
+	memset(&params, 0, sizeof(params));
+	if (!bp->cmd_helper->clock_source_id_to_atom(
+			bp_params->pll_id, &atom_pll_id)) {
+		BREAK_TO_DEBUGGER(); /* Invalid Inpute!! */
+		return BP_RESULT_BADINPUT;
+	}
+
+	/* We need to convert from KHz units into 10KHz units */
+	params.sPCLKInput.ucPpll = (uint8_t) atom_pll_id;
+	params.sPCLKInput.usPixelClock =
+			cpu_to_le16((uint16_t) (bp_params->target_pixel_clock / 10));
+	params.sPCLKInput.ucCRTC = (uint8_t) ATOM_CRTC_INVALID;
+
+	if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+		params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
+
+	if (EXEC_BIOS_CMD_TABLE(SetPixelClock, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result program_clock_v6(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+
+	SET_PIXEL_CLOCK_PS_ALLOCATION_V6 params;
+	uint32_t atom_pll_id;
+
+	memset(&params, 0, sizeof(params));
+
+	if (!bp->cmd_helper->clock_source_id_to_atom(
+			bp_params->pll_id, &atom_pll_id)) {
+		BREAK_TO_DEBUGGER(); /*Invalid Input!!*/
+		return BP_RESULT_BADINPUT;
+	}
+
+	/* We need to convert from KHz units into 10KHz units */
+	params.sPCLKInput.ucPpll = (uint8_t)atom_pll_id;
+	params.sPCLKInput.ulDispEngClkFreq =
+			cpu_to_le32(bp_params->target_pixel_clock / 10);
+
+	if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+		params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
+
+	if (EXEC_BIOS_CMD_TABLE(SetPixelClock, params)) {
+		/* True display clock is returned by VBIOS if DFS bypass
+		 * is enabled. */
+		bp_params->dfs_bypass_display_clock =
+				(uint32_t)(le32_to_cpu(params.sPCLKInput.ulDispEngClkFreq) * 10);
+		result = BP_RESULT_OK;
+	}
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                 COMPUTE MEMORY ENGINE PLL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result compute_memore_engine_pll_v4(
+	struct bios_parser *bp,
+	struct bp_display_clock_parameters *bp_params);
+
+static void init_compute_memore_engine_pll(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(ComputeMemoryEnginePLL)) {
+	case 4:
+		bp->cmd_tbl.compute_memore_engine_pll =
+				compute_memore_engine_pll_v4;
+		break;
+	default:
+		bp->cmd_tbl.compute_memore_engine_pll = NULL;
+		break;
+	}
+}
+
+static enum bp_result compute_memore_engine_pll_v4(
+	struct bios_parser *bp,
+	struct bp_display_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 params;
+
+	memset(&params, 0, sizeof(params));
+
+	params.ulClock = cpu_to_le32(bp_params->target_display_clock / 10);
+
+	/* Initialize this to the target clock in case this call fails */
+	bp_params->actual_display_clock = bp_params->target_display_clock;
+
+	if (EXEC_BIOS_CMD_TABLE(ComputeMemoryEnginePLL, params)) {
+		/* Convert from 10KHz units back to KHz */
+		bp_params->actual_display_clock =
+				le32_to_cpu(params.ulClock) * 10;
+		bp_params->actual_post_divider_id = params.ucPostDiv;
+		result = BP_RESULT_OK;
+	}
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  EXTERNAL ENCODER CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result external_encoder_control_v3(
+	struct bios_parser *bp,
+	struct bp_external_encoder_control *cntl);
+
+static void init_external_encoder_control(
+	struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(ExternalEncoderControl)) {
+	case 3:
+		bp->cmd_tbl.external_encoder_control =
+				external_encoder_control_v3;
+		break;
+	default:
+		bp->cmd_tbl.external_encoder_control = NULL;
+		break;
+	}
+}
+
+static enum bp_result external_encoder_control_v3(
+	struct bios_parser *bp,
+	struct bp_external_encoder_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+
+	/* we need use _PS_Alloc struct */
+	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 params;
+	EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 *cntl_params;
+	struct graphics_object_id encoder;
+	bool is_input_signal_dp = false;
+
+	memset(&params, 0, sizeof(params));
+
+	cntl_params = &params.sExtEncoder;
+
+	encoder = cntl->encoder_id;
+
+	/* check if encoder supports external encoder control table */
+	switch (dal_graphics_object_id_get_encoder_id(encoder)) {
+	case ENCODER_ID_EXTERNAL_NUTMEG:
+	case ENCODER_ID_EXTERNAL_TRAVIS:
+		is_input_signal_dp = true;
+		break;
+
+	default:
+		BREAK_TO_DEBUGGER();
+		return BP_RESULT_BADINPUT;
+	}
+
+	/* Fill information based on the action
+	 *
+	 * Bit[6:4]: indicate external encoder, applied to all functions.
+	 * =0: external encoder1, mapped to external encoder enum id1
+	 * =1: external encoder2, mapped to external encoder enum id2
+	 *
+	 * enum ObjectEnumId
+	 * {
+	 *  EnumId_Unknown = 0,
+	 *  EnumId_1,
+	 *  EnumId_2,
+	 * };
+	 */
+	cntl_params->ucConfig = (uint8_t)((encoder.enum_id - 1) << 4);
+
+	switch (cntl->action) {
+	case EXTERNAL_ENCODER_CONTROL_INIT:
+		/* output display connector type. Only valid in encoder
+		 * initialization */
+		cntl_params->usConnectorId =
+				cpu_to_le16((uint16_t)cntl->connector_obj_id.id);
+		break;
+	case EXTERNAL_ENCODER_CONTROL_SETUP:
+		/* EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 pixel clock unit in
+		 * 10KHz
+		 * output display device pixel clock frequency in unit of 10KHz.
+		 * Only valid in setup and enableoutput
+		 */
+		cntl_params->usPixelClock =
+				cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+		/* Indicate display output signal type drive by external
+		 * encoder, only valid in setup and enableoutput */
+		cntl_params->ucEncoderMode =
+				(uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+						cntl->signal, false);
+
+		if (is_input_signal_dp) {
+			/* Bit[0]: indicate link rate, =1: 2.7Ghz, =0: 1.62Ghz,
+			 * only valid in encoder setup with DP mode. */
+			if (LINK_RATE_HIGH == cntl->link_rate)
+				cntl_params->ucConfig |= 1;
+			/* output color depth Indicate encoder data bpc format
+			 * in DP mode, only valid in encoder setup in DP mode.
+			 */
+			cntl_params->ucBitPerColor =
+					(uint8_t)(cntl->color_depth);
+		}
+		/* Indicate how many lanes used by external encoder, only valid
+		 * in encoder setup and enableoutput. */
+		cntl_params->ucLaneNum = (uint8_t)(cntl->lanes_number);
+		break;
+	case EXTERNAL_ENCODER_CONTROL_ENABLE:
+		cntl_params->usPixelClock =
+				cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+		cntl_params->ucEncoderMode =
+				(uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+						cntl->signal, false);
+		cntl_params->ucLaneNum = (uint8_t)cntl->lanes_number;
+		break;
+	default:
+		break;
+	}
+
+	cntl_params->ucAction = (uint8_t)cntl->action;
+
+	if (EXEC_BIOS_CMD_TABLE(ExternalEncoderControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  ENABLE DISPLAY POWER GATING
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result enable_disp_power_gating_v2_1(
+	struct bios_parser *bp,
+	enum controller_id crtc_id,
+	enum bp_pipe_control_action action);
+
+static void init_enable_disp_power_gating(
+	struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(EnableDispPowerGating)) {
+	case 1:
+		bp->cmd_tbl.enable_disp_power_gating =
+				enable_disp_power_gating_v2_1;
+		break;
+	default:
+		bp->cmd_tbl.enable_disp_power_gating = NULL;
+		break;
+	}
+}
+
+static enum bp_result enable_disp_power_gating_v2_1(
+	struct bios_parser *bp,
+	enum controller_id crtc_id,
+	enum bp_pipe_control_action action)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+
+	ENABLE_DISP_POWER_GATING_PS_ALLOCATION params = {0};
+	uint8_t atom_crtc_id;
+
+	if (bp->cmd_helper->controller_id_to_atom(crtc_id, &atom_crtc_id))
+		params.ucDispPipeId = atom_crtc_id;
+	else
+		return BP_RESULT_BADINPUT;
+
+	params.ucEnable =
+			bp->cmd_helper->disp_power_gating_action_to_atom(action);
+
+	if (EXEC_BIOS_CMD_TABLE(EnableDispPowerGating, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  SET DCE CLOCK
+ **
+ ********************************************************************************
+ *******************************************************************************/
+#ifdef LATEST_ATOM_BIOS_SUPPORT
+static enum bp_result set_dce_clock_v2_1(
+	struct bios_parser *bp,
+	struct bp_set_dce_clock_parameters *bp_params);
+#endif
+
+static void init_set_dce_clock(struct bios_parser *bp)
+{
+#ifdef LATEST_ATOM_BIOS_SUPPORT
+	switch (BIOS_CMD_TABLE_PARA_REVISION(SetDCEClock)) {
+	case 1:
+		bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
+		break;
+	default:
+		bp->cmd_tbl.set_dce_clock = NULL;
+		break;
+	}
+#endif
+}
+
+#ifdef LATEST_ATOM_BIOS_SUPPORT
+static enum bp_result set_dce_clock_v2_1(
+	struct bios_parser *bp,
+	struct bp_set_dce_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+
+	SET_DCE_CLOCK_PS_ALLOCATION_V2_1 params;
+	uint32_t atom_pll_id;
+	uint32_t atom_clock_type;
+	const struct command_table_helper *cmd = bp->cmd_helper;
+
+	memset(&params, 0, sizeof(params));
+
+	if (!cmd->clock_source_id_to_atom(bp_params->pll_id, &atom_pll_id) ||
+			!cmd->dc_clock_type_to_atom(bp_params->clock_type, &atom_clock_type))
+		return BP_RESULT_BADINPUT;
+
+	params.asParam.ucDCEClkSrc  = atom_pll_id;
+	params.asParam.ucDCEClkType = atom_clock_type;
+
+	if (bp_params->clock_type == DCECLOCK_TYPE_DPREFCLK) {
+		if (bp_params->flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK)
+			params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENLK;
+
+		if (bp_params->flags.USE_PCIE_AS_SOURCE_FOR_DPREFCLK)
+			params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_PCIE;
+
+		if (bp_params->flags.USE_XTALIN_AS_SOURCE_FOR_DPREFCLK)
+			params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_XTALIN;
+
+		if (bp_params->flags.USE_GENERICA_AS_SOURCE_FOR_DPREFCLK)
+			params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENERICA;
+	}
+	else
+		/* only program clock frequency if display clock is used; VBIOS will program DPREFCLK */
+		/* We need to convert from KHz units into 10KHz units */
+		params.asParam.ulDCEClkFreq = cpu_to_le32(bp_params->target_clock_frequency / 10);
+
+	if (EXEC_BIOS_CMD_TABLE(SetDCEClock, params)) {
+		/* Convert from 10KHz units back to KHz */
+		bp_params->target_clock_frequency = le32_to_cpu(params.asParam.ulDCEClkFreq) * 10;
+		result = BP_RESULT_OK;
+	}
+
+	return result;
+}
+#endif

+ 112 - 0
drivers/gpu/drm/amd/display/dc/bios/command_table.h

@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_H__
+#define __DAL_COMMAND_TABLE_H__
+
+struct bios_parser;
+struct bp_encoder_control;
+
+struct cmd_tbl {
+	enum bp_result (*dig_encoder_control)(
+		struct bios_parser *bp,
+		struct bp_encoder_control *control);
+	enum bp_result (*encoder_control_dig1)(
+		struct bios_parser *bp,
+		struct bp_encoder_control *control);
+	enum bp_result (*encoder_control_dig2)(
+		struct bios_parser *bp,
+		struct bp_encoder_control *control);
+	enum bp_result (*transmitter_control)(
+		struct bios_parser *bp,
+		struct bp_transmitter_control *control);
+	enum bp_result (*set_pixel_clock)(
+		struct bios_parser *bp,
+		struct bp_pixel_clock_parameters *bp_params);
+	enum bp_result (*enable_spread_spectrum_on_ppll)(
+		struct bios_parser *bp,
+		struct bp_spread_spectrum_parameters *bp_params,
+		bool enable);
+	enum bp_result (*adjust_display_pll)(
+		struct bios_parser *bp,
+		struct bp_adjust_pixel_clock_parameters *bp_params);
+	enum bp_result (*dac1_encoder_control)(
+		struct bios_parser *bp,
+		bool enable,
+		uint32_t pixel_clock,
+		uint8_t dac_standard);
+	enum bp_result (*dac2_encoder_control)(
+		struct bios_parser *bp,
+		bool enable,
+		uint32_t pixel_clock,
+		uint8_t dac_standard);
+	enum bp_result (*dac1_output_control)(
+		struct bios_parser *bp,
+		bool enable);
+	enum bp_result (*dac2_output_control)(
+		struct bios_parser *bp,
+		bool enable);
+	enum bp_result (*blank_crtc)(
+		struct bios_parser *bp,
+		struct bp_blank_crtc_parameters *bp_params,
+		bool blank);
+	enum bp_result (*set_crtc_timing)(
+		struct bios_parser *bp,
+		struct bp_hw_crtc_timing_parameters *bp_params);
+	enum bp_result (*set_crtc_overscan)(
+		struct bios_parser *bp,
+		struct bp_hw_crtc_overscan_parameters *bp_params);
+	enum bp_result (*select_crtc_source)(
+		struct bios_parser *bp,
+		struct bp_crtc_source_select *bp_params);
+	enum bp_result (*enable_crtc)(
+		struct bios_parser *bp,
+		enum controller_id controller_id,
+		bool enable);
+	enum bp_result (*enable_crtc_mem_req)(
+		struct bios_parser *bp,
+		enum controller_id controller_id,
+		bool enable);
+	enum bp_result (*program_clock)(
+		struct bios_parser *bp,
+		struct bp_pixel_clock_parameters *bp_params);
+	enum bp_result (*compute_memore_engine_pll)(
+		struct bios_parser *bp,
+		struct bp_display_clock_parameters *bp_params);
+	enum bp_result (*external_encoder_control)(
+			struct bios_parser *bp,
+			struct bp_external_encoder_control *cntl);
+	enum bp_result (*enable_disp_power_gating)(
+		struct bios_parser *bp,
+		enum controller_id crtc_id,
+		enum bp_pipe_control_action action);
+	enum bp_result (*set_dce_clock)(
+		struct bios_parser *bp,
+		struct bp_set_dce_clock_parameters *bp_params);
+};
+
+void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp);
+
+#endif

+ 288 - 0
drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c

@@ -0,0 +1,288 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "command_table_helper.h"
+
+bool dal_bios_parser_init_cmd_tbl_helper(
+	const struct command_table_helper **h,
+	enum dce_version dce)
+{
+	switch (dce) {
+	case DCE_VERSION_8_0:
+		*h = dal_cmd_tbl_helper_dce80_get_table();
+		return true;
+
+	case DCE_VERSION_10_0:
+		*h = dal_cmd_tbl_helper_dce110_get_table();
+		return true;
+
+	case DCE_VERSION_11_0:
+		*h = dal_cmd_tbl_helper_dce110_get_table();
+		return true;
+
+	case DCE_VERSION_11_2:
+		*h = dal_cmd_tbl_helper_dce112_get_table();
+		return true;
+
+	default:
+		/* Unsupported DCE */
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+}
+
+/* real implementations */
+
+bool dal_cmd_table_helper_controller_id_to_atom(
+	enum controller_id id,
+	uint8_t *atom_id)
+{
+	if (atom_id == NULL) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	switch (id) {
+	case CONTROLLER_ID_D0:
+		*atom_id = ATOM_CRTC1;
+		return true;
+	case CONTROLLER_ID_D1:
+		*atom_id = ATOM_CRTC2;
+		return true;
+	case CONTROLLER_ID_D2:
+		*atom_id = ATOM_CRTC3;
+		return true;
+	case CONTROLLER_ID_D3:
+		*atom_id = ATOM_CRTC4;
+		return true;
+	case CONTROLLER_ID_D4:
+		*atom_id = ATOM_CRTC5;
+		return true;
+	case CONTROLLER_ID_D5:
+		*atom_id = ATOM_CRTC6;
+		return true;
+	case CONTROLLER_ID_UNDERLAY0:
+		*atom_id = ATOM_UNDERLAY_PIPE0;
+		return true;
+	case CONTROLLER_ID_UNDEFINED:
+		*atom_id = ATOM_CRTC_INVALID;
+		return true;
+	default:
+		/* Wrong controller id */
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+}
+
+/**
+* translate_transmitter_bp_to_atom
+*
+* @brief
+*  Translate the Transmitter to the corresponding ATOM BIOS value
+*
+* @param
+*   input transmitter
+*   output digitalTransmitter
+*    // =00: Digital Transmitter1 ( UNIPHY linkAB )
+*    // =01: Digital Transmitter2 ( UNIPHY linkCD )
+*    // =02: Digital Transmitter3 ( UNIPHY linkEF )
+*/
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
+	enum transmitter t)
+{
+	switch (t) {
+	case TRANSMITTER_UNIPHY_A:
+	case TRANSMITTER_UNIPHY_B:
+	case TRANSMITTER_TRAVIS_LCD:
+		return 0;
+	case TRANSMITTER_UNIPHY_C:
+	case TRANSMITTER_UNIPHY_D:
+		return 1;
+	case TRANSMITTER_UNIPHY_E:
+	case TRANSMITTER_UNIPHY_F:
+		return 2;
+	default:
+		/* Invalid Transmitter Type! */
+		BREAK_TO_DEBUGGER();
+		return 0;
+	}
+}
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom(
+	enum signal_type s,
+	bool enable_dp_audio)
+{
+	switch (s) {
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+		return ATOM_ENCODER_MODE_DVI;
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		return ATOM_ENCODER_MODE_HDMI;
+	case SIGNAL_TYPE_LVDS:
+		return ATOM_ENCODER_MODE_LVDS;
+	case SIGNAL_TYPE_EDP:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_VIRTUAL:
+		if (enable_dp_audio)
+			return ATOM_ENCODER_MODE_DP_AUDIO;
+		else
+			return ATOM_ENCODER_MODE_DP;
+	case SIGNAL_TYPE_RGB:
+		return ATOM_ENCODER_MODE_CRT;
+	default:
+		return ATOM_ENCODER_MODE_CRT;
+	}
+}
+
+void dal_cmd_table_helper_assign_control_parameter(
+	const struct command_table_helper *h,
+	struct bp_encoder_control *control,
+	DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param)
+{
+	/* there are three transmitter blocks, each one has two links 4-lanes
+	 * each, A+B, C+D, E+F, Uniphy A, C and E are enumerated as link 0 in
+	 * each transmitter block B, D and F as link 1, third transmitter block
+	 * has non splitable links (UniphyE and UniphyF can not be configured
+	 * separately to drive two different streams)
+	 */
+	if ((control->transmitter == TRANSMITTER_UNIPHY_B) ||
+		(control->transmitter == TRANSMITTER_UNIPHY_D) ||
+		(control->transmitter == TRANSMITTER_UNIPHY_F)) {
+		/* Bit2: Link Select
+		 * =0: PHY linkA/C/E
+		 * =1: PHY linkB/D/F
+		 */
+		ctrl_param->acConfig.ucLinkSel = 1;
+	}
+
+	/* Bit[4:3]: Transmitter Selection
+	 * =00: Digital Transmitter1 ( UNIPHY linkAB )
+	 * =01: Digital Transmitter2 ( UNIPHY linkCD )
+	 * =02: Digital Transmitter3 ( UNIPHY linkEF )
+	 * =03: Reserved
+	 */
+	ctrl_param->acConfig.ucTransmitterSel =
+		(uint8_t)(h->transmitter_bp_to_atom(control->transmitter));
+
+	/* We need to convert from KHz units into 10KHz units */
+	ctrl_param->ucAction = h->encoder_action_to_atom(control->action);
+	ctrl_param->usPixelClock = cpu_to_le16((uint16_t)(control->pixel_clock / 10));
+	ctrl_param->ucEncoderMode =
+		(uint8_t)(h->encoder_mode_bp_to_atom(
+			control->signal, control->enable_dp_audio));
+	ctrl_param->ucLaneNum = (uint8_t)(control->lanes_number);
+}
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src(
+	enum clock_source_id id,
+	uint32_t *ref_clk_src_id)
+{
+	if (ref_clk_src_id == NULL) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	switch (id) {
+	case CLOCK_SOURCE_ID_PLL1:
+		*ref_clk_src_id = ENCODER_REFCLK_SRC_P1PLL;
+		return true;
+	case CLOCK_SOURCE_ID_PLL2:
+		*ref_clk_src_id = ENCODER_REFCLK_SRC_P2PLL;
+		return true;
+	case CLOCK_SOURCE_ID_DCPLL:
+		*ref_clk_src_id = ENCODER_REFCLK_SRC_DCPLL;
+		return true;
+	case CLOCK_SOURCE_ID_EXTERNAL:
+		*ref_clk_src_id = ENCODER_REFCLK_SRC_EXTCLK;
+		return true;
+	case CLOCK_SOURCE_ID_UNDEFINED:
+		*ref_clk_src_id = ENCODER_REFCLK_SRC_INVALID;
+		return true;
+	default:
+		/* Unsupported clock source id */
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+}
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom(
+	enum encoder_id id)
+{
+	switch (id) {
+	case ENCODER_ID_INTERNAL_LVDS:
+		return ENCODER_OBJECT_ID_INTERNAL_LVDS;
+	case ENCODER_ID_INTERNAL_TMDS1:
+		return ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+	case ENCODER_ID_INTERNAL_TMDS2:
+		return ENCODER_OBJECT_ID_INTERNAL_TMDS2;
+	case ENCODER_ID_INTERNAL_DAC1:
+		return ENCODER_OBJECT_ID_INTERNAL_DAC1;
+	case ENCODER_ID_INTERNAL_DAC2:
+		return ENCODER_OBJECT_ID_INTERNAL_DAC2;
+	case ENCODER_ID_INTERNAL_LVTM1:
+		return ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+	case ENCODER_ID_INTERNAL_HDMI:
+		return ENCODER_OBJECT_ID_HDMI_INTERNAL;
+	case ENCODER_ID_EXTERNAL_TRAVIS:
+		return ENCODER_OBJECT_ID_TRAVIS;
+	case ENCODER_ID_EXTERNAL_NUTMEG:
+		return ENCODER_OBJECT_ID_NUTMEG;
+	case ENCODER_ID_INTERNAL_KLDSCP_TMDS1:
+		return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+	case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+		return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+	case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+		return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+	case ENCODER_ID_EXTERNAL_MVPU_FPGA:
+		return ENCODER_OBJECT_ID_MVPU_FPGA;
+	case ENCODER_ID_INTERNAL_DDI:
+		return ENCODER_OBJECT_ID_INTERNAL_DDI;
+	case ENCODER_ID_INTERNAL_UNIPHY:
+		return ENCODER_OBJECT_ID_INTERNAL_UNIPHY;
+	case ENCODER_ID_INTERNAL_KLDSCP_LVTMA:
+		return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA;
+	case ENCODER_ID_INTERNAL_UNIPHY1:
+		return ENCODER_OBJECT_ID_INTERNAL_UNIPHY1;
+	case ENCODER_ID_INTERNAL_UNIPHY2:
+		return ENCODER_OBJECT_ID_INTERNAL_UNIPHY2;
+	case ENCODER_ID_INTERNAL_UNIPHY3:
+		return ENCODER_OBJECT_ID_INTERNAL_UNIPHY3;
+	case ENCODER_ID_INTERNAL_WIRELESS:
+		return ENCODER_OBJECT_ID_INTERNAL_VCE;
+	case ENCODER_ID_UNKNOWN:
+		return ENCODER_OBJECT_ID_NONE;
+	default:
+		/* Invalid encoder id */
+		BREAK_TO_DEBUGGER();
+		return ENCODER_OBJECT_ID_NONE;
+	}
+}

+ 90 - 0
drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h

@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_H__
+#define __DAL_COMMAND_TABLE_HELPER_H__
+
+#include "dce80/command_table_helper_dce80.h"
+#include "dce110/command_table_helper_dce110.h"
+#include "dce112/command_table_helper_dce112.h"
+
+struct command_table_helper {
+	bool (*controller_id_to_atom)(enum controller_id id, uint8_t *atom_id);
+	uint8_t (*encoder_action_to_atom)(
+			enum bp_encoder_control_action action);
+	uint32_t (*encoder_mode_bp_to_atom)(enum signal_type s,
+			bool enable_dp_audio);
+	bool (*engine_bp_to_atom)(enum engine_id engine_id,
+			uint32_t *atom_engine_id);
+	void (*assign_control_parameter)(
+			const struct command_table_helper *h,
+			struct bp_encoder_control *control,
+			DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param);
+	bool (*clock_source_id_to_atom)(enum clock_source_id id,
+			uint32_t *atom_pll_id);
+	bool (*clock_source_id_to_ref_clk_src)(
+			enum clock_source_id id,
+			uint32_t *ref_clk_src_id);
+	uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
+	uint8_t (*encoder_id_to_atom)(enum encoder_id id);
+	uint8_t (*clock_source_id_to_atom_phy_clk_src_id)(
+			enum clock_source_id id);
+	uint8_t (*signal_type_to_atom_dig_mode)(enum signal_type s);
+	uint8_t (*hpd_sel_to_atom)(enum hpd_source_id id);
+	uint8_t (*dig_encoder_sel_to_atom)(enum engine_id engine_id);
+	uint8_t (*phy_id_to_atom)(enum transmitter t);
+	uint8_t (*disp_power_gating_action_to_atom)(
+			enum bp_pipe_control_action action);
+	bool (*dc_clock_type_to_atom)(enum bp_dce_clock_type id,
+			uint32_t *atom_clock_type);
+    uint8_t (*transmitter_color_depth_to_atom)(enum transmitter_color_depth id);
+};
+
+bool dal_bios_parser_init_cmd_tbl_helper(const struct command_table_helper **h,
+	enum dce_version dce);
+
+bool dal_cmd_table_helper_controller_id_to_atom(
+	enum controller_id id,
+	uint8_t *atom_id);
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom(
+	enum signal_type s,
+	bool enable_dp_audio);
+
+void dal_cmd_table_helper_assign_control_parameter(
+	const struct command_table_helper *h,
+	struct bp_encoder_control *control,
+DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param);
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src(
+	enum clock_source_id id,
+	uint32_t *ref_clk_src_id);
+
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
+	enum transmitter t);
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom(
+	enum encoder_id id);
+#endif

+ 364 - 0
drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c

@@ -0,0 +1,364 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper.h"
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+	uint8_t atom_phy_id;
+
+	switch (t) {
+	case TRANSMITTER_UNIPHY_A:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+		break;
+	case TRANSMITTER_UNIPHY_B:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+		break;
+	case TRANSMITTER_UNIPHY_C:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+		break;
+	case TRANSMITTER_UNIPHY_D:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+		break;
+	case TRANSMITTER_UNIPHY_E:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+		break;
+	case TRANSMITTER_UNIPHY_F:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+		break;
+	case TRANSMITTER_UNIPHY_G:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+		break;
+	default:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+		break;
+	}
+	return atom_phy_id;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+	uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+
+	switch (s) {
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_EDP:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+		break;
+	case SIGNAL_TYPE_LVDS:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS;
+		break;
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+		break;
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI;
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST;
+		break;
+	default:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+		break;
+	}
+
+	return atom_dig_mode;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+		enum clock_source_id id)
+{
+	uint8_t atom_phy_clk_src_id = 0;
+
+	switch (id) {
+	case CLOCK_SOURCE_ID_PLL0:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+		break;
+	case CLOCK_SOURCE_ID_PLL1:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+		break;
+	case CLOCK_SOURCE_ID_PLL2:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+		break;
+	case CLOCK_SOURCE_ID_EXTERNAL:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+		break;
+	default:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+		break;
+	}
+
+	return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+	uint8_t atom_hpd_sel = 0;
+
+	switch (id) {
+	case HPD_SOURCEID1:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL;
+		break;
+	case HPD_SOURCEID2:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL;
+		break;
+	case HPD_SOURCEID3:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL;
+		break;
+	case HPD_SOURCEID4:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL;
+		break;
+	case HPD_SOURCEID5:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL;
+		break;
+	case HPD_SOURCEID6:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL;
+		break;
+	case HPD_SOURCEID_UNKNOWN:
+	default:
+		atom_hpd_sel = 0;
+		break;
+	}
+	return atom_hpd_sel >> 4;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+	uint8_t atom_dig_encoder_sel = 0;
+
+	switch (id) {
+	case ENGINE_ID_DIGA:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+		break;
+	case ENGINE_ID_DIGB:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL;
+		break;
+	case ENGINE_ID_DIGC:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL;
+		break;
+	case ENGINE_ID_DIGD:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL;
+		break;
+	case ENGINE_ID_DIGE:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL;
+		break;
+	case ENGINE_ID_DIGF:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL;
+		break;
+	case ENGINE_ID_DIGG:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL;
+		break;
+	case ENGINE_ID_UNKNOWN:
+		 /* No DIG_FRONT is associated to DIG_BACKEND */
+		atom_dig_encoder_sel = 0;
+		break;
+	default:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+		break;
+	}
+
+	return atom_dig_encoder_sel;
+}
+
+static bool clock_source_id_to_atom(
+	enum clock_source_id id,
+	uint32_t *atom_pll_id)
+{
+	bool result = true;
+
+	if (atom_pll_id != NULL)
+		switch (id) {
+		case CLOCK_SOURCE_ID_PLL0:
+			*atom_pll_id = ATOM_PPLL0;
+			break;
+		case CLOCK_SOURCE_ID_PLL1:
+			*atom_pll_id = ATOM_PPLL1;
+			break;
+		case CLOCK_SOURCE_ID_PLL2:
+			*atom_pll_id = ATOM_PPLL2;
+			break;
+		case CLOCK_SOURCE_ID_EXTERNAL:
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			break;
+		case CLOCK_SOURCE_ID_DFS:
+			*atom_pll_id = ATOM_EXT_PLL1;
+			break;
+		case CLOCK_SOURCE_ID_VCE:
+			/* for VCE encoding,
+			 * we need to pass in ATOM_PPLL_INVALID
+			 */
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			break;
+		case CLOCK_SOURCE_ID_DP_DTO:
+			/* When programming DP DTO PLL ID should be invalid */
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			break;
+		case CLOCK_SOURCE_ID_UNDEFINED:
+			/* Should not happen */
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			result = false;
+			break;
+		default:
+			result = false;
+			break;
+		}
+
+	return result;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+	bool result = false;
+
+	if (atom_engine_id != NULL)
+		switch (id) {
+		case ENGINE_ID_DIGA:
+			*atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGB:
+			*atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGC:
+			*atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGD:
+			*atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGE:
+			*atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGF:
+			*atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGG:
+			*atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DACA:
+			*atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+			result = true;
+			break;
+		default:
+			break;
+		}
+
+	return result;
+}
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+	uint8_t atom_action = 0;
+
+	switch (action) {
+	case ENCODER_CONTROL_ENABLE:
+		atom_action = ATOM_ENABLE;
+		break;
+	case ENCODER_CONTROL_DISABLE:
+		atom_action = ATOM_DISABLE;
+		break;
+	case ENCODER_CONTROL_SETUP:
+		atom_action = ATOM_ENCODER_CMD_SETUP;
+		break;
+	case ENCODER_CONTROL_INIT:
+		atom_action = ATOM_ENCODER_INIT;
+		break;
+	default:
+		BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+		break;
+	}
+
+	return atom_action;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+	enum bp_pipe_control_action action)
+{
+	uint8_t atom_pipe_action = 0;
+
+	switch (action) {
+	case ASIC_PIPE_DISABLE:
+		atom_pipe_action = ATOM_DISABLE;
+		break;
+	case ASIC_PIPE_ENABLE:
+		atom_pipe_action = ATOM_ENABLE;
+		break;
+	case ASIC_PIPE_INIT:
+		atom_pipe_action = ATOM_INIT;
+		break;
+	default:
+		ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+		break;
+	}
+
+	return atom_pipe_action;
+}
+
+/* function table */
+static const struct command_table_helper command_table_helper_funcs = {
+	.controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
+	.encoder_action_to_atom = encoder_action_to_atom,
+	.engine_bp_to_atom = engine_bp_to_atom,
+	.clock_source_id_to_atom = clock_source_id_to_atom,
+	.clock_source_id_to_atom_phy_clk_src_id =
+			clock_source_id_to_atom_phy_clk_src_id,
+	.signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+	.hpd_sel_to_atom = hpd_sel_to_atom,
+	.dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+	.phy_id_to_atom = phy_id_to_atom,
+	.disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+	.assign_control_parameter = NULL,
+	.clock_source_id_to_ref_clk_src = NULL,
+	.transmitter_bp_to_atom = NULL,
+	.encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
+	.encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom,
+};
+
+/*
+ * dal_cmd_tbl_helper_dce110_get_table
+ *
+ * @brief
+ * Initialize command table helper functions
+ *
+ * @param
+ * const struct command_table_helper **h - [out] struct of functions
+ *
+ */
+const struct command_table_helper *dal_cmd_tbl_helper_dce110_get_table()
+{
+	return &command_table_helper_funcs;
+}

+ 34 - 0
drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h

@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_DCE110_H__
+#define __DAL_COMMAND_TABLE_HELPER_DCE110_H__
+
+struct command_table_helper;
+
+/* Initialize command table helper functions */
+const struct command_table_helper *dal_cmd_tbl_helper_dce110_get_table(void);
+
+#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */

+ 418 - 0
drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c

@@ -0,0 +1,418 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper.h"
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+	uint8_t atom_phy_id;
+
+	switch (t) {
+	case TRANSMITTER_UNIPHY_A:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+		break;
+	case TRANSMITTER_UNIPHY_B:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+		break;
+	case TRANSMITTER_UNIPHY_C:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+		break;
+	case TRANSMITTER_UNIPHY_D:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+		break;
+	case TRANSMITTER_UNIPHY_E:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+		break;
+	case TRANSMITTER_UNIPHY_F:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+		break;
+	case TRANSMITTER_UNIPHY_G:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+		break;
+	default:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+		break;
+	}
+	return atom_phy_id;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+	uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+
+	switch (s) {
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_EDP:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+		break;
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+		break;
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_HDMI;
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP_MST;
+		break;
+	default:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+		break;
+	}
+
+	return atom_dig_mode;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+		enum clock_source_id id)
+{
+	uint8_t atom_phy_clk_src_id = 0;
+
+	switch (id) {
+	case CLOCK_SOURCE_ID_PLL0:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+		break;
+	case CLOCK_SOURCE_ID_PLL1:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+		break;
+	case CLOCK_SOURCE_ID_PLL2:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+		break;
+	case CLOCK_SOURCE_ID_EXTERNAL:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+		break;
+	default:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+		break;
+	}
+
+	return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+	uint8_t atom_hpd_sel = 0;
+
+	switch (id) {
+	case HPD_SOURCEID1:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD1_SEL;
+		break;
+	case HPD_SOURCEID2:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD2_SEL;
+		break;
+	case HPD_SOURCEID3:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD3_SEL;
+		break;
+	case HPD_SOURCEID4:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD4_SEL;
+		break;
+	case HPD_SOURCEID5:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD5_SEL;
+		break;
+	case HPD_SOURCEID6:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD6_SEL;
+		break;
+	case HPD_SOURCEID_UNKNOWN:
+	default:
+		atom_hpd_sel = 0;
+		break;
+	}
+	return atom_hpd_sel;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+	uint8_t atom_dig_encoder_sel = 0;
+
+	switch (id) {
+	case ENGINE_ID_DIGA:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+		break;
+	case ENGINE_ID_DIGB:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGB_SEL;
+		break;
+	case ENGINE_ID_DIGC:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGC_SEL;
+		break;
+	case ENGINE_ID_DIGD:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGD_SEL;
+		break;
+	case ENGINE_ID_DIGE:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGE_SEL;
+		break;
+	case ENGINE_ID_DIGF:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGF_SEL;
+		break;
+	case ENGINE_ID_DIGG:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGG_SEL;
+		break;
+	case ENGINE_ID_UNKNOWN:
+		/* No DIG_FRONT is associated to DIG_BACKEND */
+		atom_dig_encoder_sel = 0;
+		break;
+	default:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+		break;
+	}
+
+	return atom_dig_encoder_sel;
+}
+
+static bool clock_source_id_to_atom(
+	enum clock_source_id id,
+	uint32_t *atom_pll_id)
+{
+	bool result = true;
+
+	if (atom_pll_id != NULL)
+		switch (id) {
+		case CLOCK_SOURCE_COMBO_PHY_PLL0:
+			*atom_pll_id = ATOM_COMBOPHY_PLL0;
+			break;
+		case CLOCK_SOURCE_COMBO_PHY_PLL1:
+			*atom_pll_id = ATOM_COMBOPHY_PLL1;
+			break;
+		case CLOCK_SOURCE_COMBO_PHY_PLL2:
+			*atom_pll_id = ATOM_COMBOPHY_PLL2;
+			break;
+		case CLOCK_SOURCE_COMBO_PHY_PLL3:
+			*atom_pll_id = ATOM_COMBOPHY_PLL3;
+			break;
+		case CLOCK_SOURCE_COMBO_PHY_PLL4:
+			*atom_pll_id = ATOM_COMBOPHY_PLL4;
+			break;
+		case CLOCK_SOURCE_COMBO_PHY_PLL5:
+			*atom_pll_id = ATOM_COMBOPHY_PLL5;
+			break;
+		case CLOCK_SOURCE_COMBO_DISPLAY_PLL0:
+			*atom_pll_id = ATOM_PPLL0;
+			break;
+		case CLOCK_SOURCE_ID_DFS:
+			*atom_pll_id = ATOM_GCK_DFS;
+			break;
+		case CLOCK_SOURCE_ID_VCE:
+			*atom_pll_id = ATOM_DP_DTO;
+			break;
+		case CLOCK_SOURCE_ID_DP_DTO:
+			*atom_pll_id = ATOM_DP_DTO;
+			break;
+		case CLOCK_SOURCE_ID_UNDEFINED:
+			/* Should not happen */
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			result = false;
+			break;
+		default:
+			result = false;
+			break;
+		}
+
+	return result;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+	bool result = false;
+
+	if (atom_engine_id != NULL)
+		switch (id) {
+		case ENGINE_ID_DIGA:
+			*atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGB:
+			*atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGC:
+			*atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGD:
+			*atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGE:
+			*atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGF:
+			*atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGG:
+			*atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DACA:
+			*atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+			result = true;
+			break;
+		default:
+			break;
+		}
+
+	return result;
+}
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+	uint8_t atom_action = 0;
+
+	switch (action) {
+	case ENCODER_CONTROL_ENABLE:
+		atom_action = ATOM_ENABLE;
+		break;
+	case ENCODER_CONTROL_DISABLE:
+		atom_action = ATOM_DISABLE;
+		break;
+	case ENCODER_CONTROL_SETUP:
+		atom_action = ATOM_ENCODER_CMD_STREAM_SETUP;
+		break;
+	case ENCODER_CONTROL_INIT:
+		atom_action = ATOM_ENCODER_INIT;
+		break;
+	default:
+		BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+		break;
+	}
+
+	return atom_action;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+	enum bp_pipe_control_action action)
+{
+	uint8_t atom_pipe_action = 0;
+
+	switch (action) {
+	case ASIC_PIPE_DISABLE:
+		atom_pipe_action = ATOM_DISABLE;
+		break;
+	case ASIC_PIPE_ENABLE:
+		atom_pipe_action = ATOM_ENABLE;
+		break;
+	case ASIC_PIPE_INIT:
+		atom_pipe_action = ATOM_INIT;
+		break;
+	default:
+		ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+		break;
+	}
+
+	return atom_pipe_action;
+}
+
+static bool dc_clock_type_to_atom(
+		enum bp_dce_clock_type id,
+		uint32_t *atom_clock_type)
+{
+	bool retCode = true;
+
+	if (atom_clock_type != NULL) {
+		switch (id) {
+		case DCECLOCK_TYPE_DISPLAY_CLOCK:
+			*atom_clock_type = DCE_CLOCK_TYPE_DISPCLK;
+			break;
+
+		case DCECLOCK_TYPE_DPREFCLK:
+			*atom_clock_type = DCE_CLOCK_TYPE_DPREFCLK;
+			break;
+
+		default:
+			ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+			break;
+		}
+	}
+
+	return retCode;
+}
+
+static uint8_t transmitter_color_depth_to_atom(enum transmitter_color_depth id)
+{
+	uint8_t atomColorDepth = 0;
+
+	switch (id) {
+	case TRANSMITTER_COLOR_DEPTH_24:
+		atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS;
+		break;
+	case TRANSMITTER_COLOR_DEPTH_30:
+		atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4;
+		break;
+	case TRANSMITTER_COLOR_DEPTH_36:
+		atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2;
+		break;
+	case TRANSMITTER_COLOR_DEPTH_48:
+		atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1;
+		break;
+	default:
+		ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+		break;
+	}
+
+	return atomColorDepth;
+}
+
+/* function table */
+static const struct command_table_helper command_table_helper_funcs = {
+	.controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
+	.encoder_action_to_atom = encoder_action_to_atom,
+	.engine_bp_to_atom = engine_bp_to_atom,
+	.clock_source_id_to_atom = clock_source_id_to_atom,
+	.clock_source_id_to_atom_phy_clk_src_id =
+			clock_source_id_to_atom_phy_clk_src_id,
+	.signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+	.hpd_sel_to_atom = hpd_sel_to_atom,
+	.dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+	.phy_id_to_atom = phy_id_to_atom,
+	.disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+	.assign_control_parameter = NULL,
+	.clock_source_id_to_ref_clk_src = NULL,
+	.transmitter_bp_to_atom = NULL,
+	.encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
+	.encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom,
+	.dc_clock_type_to_atom = dc_clock_type_to_atom,
+	.transmitter_color_depth_to_atom = transmitter_color_depth_to_atom,
+};
+
+/*
+ * dal_cmd_tbl_helper_dce110_get_table
+ *
+ * @brief
+ * Initialize command table helper functions
+ *
+ * @param
+ * const struct command_table_helper **h - [out] struct of functions
+ *
+ */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table()
+{
+	return &command_table_helper_funcs;
+}

+ 34 - 0
drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h

@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_DCE112_H__
+#define __DAL_COMMAND_TABLE_HELPER_DCE112_H__
+
+struct command_table_helper;
+
+/* Initialize command table helper functions */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table(void);
+
+#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */

+ 354 - 0
drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c

@@ -0,0 +1,354 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/grph_object_id.h"
+#include "include/grph_object_defs.h"
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper.h"
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+	uint8_t atom_action = 0;
+
+	switch (action) {
+	case ENCODER_CONTROL_ENABLE:
+		atom_action = ATOM_ENABLE;
+		break;
+	case ENCODER_CONTROL_DISABLE:
+		atom_action = ATOM_DISABLE;
+		break;
+	case ENCODER_CONTROL_SETUP:
+		atom_action = ATOM_ENCODER_CMD_SETUP;
+		break;
+	case ENCODER_CONTROL_INIT:
+		atom_action = ATOM_ENCODER_INIT;
+		break;
+	default:
+		BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+		break;
+	}
+
+	return atom_action;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+	bool result = false;
+
+	if (atom_engine_id != NULL)
+		switch (id) {
+		case ENGINE_ID_DIGA:
+			*atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGB:
+			*atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGC:
+			*atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGD:
+			*atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGE:
+			*atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGF:
+			*atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGG:
+			*atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DACA:
+			*atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+			result = true;
+			break;
+		default:
+			break;
+		}
+
+	return result;
+}
+
+static bool clock_source_id_to_atom(
+	enum clock_source_id id,
+	uint32_t *atom_pll_id)
+{
+	bool result = true;
+
+	if (atom_pll_id != NULL)
+		switch (id) {
+		case CLOCK_SOURCE_ID_PLL0:
+			*atom_pll_id = ATOM_PPLL0;
+			break;
+		case CLOCK_SOURCE_ID_PLL1:
+			*atom_pll_id = ATOM_PPLL1;
+			break;
+		case CLOCK_SOURCE_ID_PLL2:
+			*atom_pll_id = ATOM_PPLL2;
+			break;
+		case CLOCK_SOURCE_ID_EXTERNAL:
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			break;
+		case CLOCK_SOURCE_ID_DFS:
+			*atom_pll_id = ATOM_EXT_PLL1;
+			break;
+		case CLOCK_SOURCE_ID_VCE:
+			/* for VCE encoding,
+			 * we need to pass in ATOM_PPLL_INVALID
+			 */
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			break;
+		case CLOCK_SOURCE_ID_DP_DTO:
+			/* When programming DP DTO PLL ID should be invalid */
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			break;
+		case CLOCK_SOURCE_ID_UNDEFINED:
+			BREAK_TO_DEBUGGER(); /* check when this will happen! */
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			result = false;
+			break;
+		default:
+			result = false;
+			break;
+		}
+
+	return result;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+		enum clock_source_id id)
+{
+	uint8_t atom_phy_clk_src_id = 0;
+
+	switch (id) {
+	case CLOCK_SOURCE_ID_PLL0:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+		break;
+	case CLOCK_SOURCE_ID_PLL1:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+		break;
+	case CLOCK_SOURCE_ID_PLL2:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+		break;
+	case CLOCK_SOURCE_ID_EXTERNAL:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+		break;
+	default:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+		break;
+	}
+
+	return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+	uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+
+	switch (s) {
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_EDP:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+		break;
+	case SIGNAL_TYPE_LVDS:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS;
+		break;
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+		break;
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI;
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST;
+		break;
+	default:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+		break;
+	}
+
+	return atom_dig_mode;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+	uint8_t atom_hpd_sel = 0;
+
+	switch (id) {
+	case HPD_SOURCEID1:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL;
+		break;
+	case HPD_SOURCEID2:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL;
+		break;
+	case HPD_SOURCEID3:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL;
+		break;
+	case HPD_SOURCEID4:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL;
+		break;
+	case HPD_SOURCEID5:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL;
+		break;
+	case HPD_SOURCEID6:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL;
+		break;
+	case HPD_SOURCEID_UNKNOWN:
+	default:
+		atom_hpd_sel = 0;
+		break;
+	}
+	return atom_hpd_sel >> 4;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+	uint8_t atom_dig_encoder_sel = 0;
+
+	switch (id) {
+	case ENGINE_ID_DIGA:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+		break;
+	case ENGINE_ID_DIGB:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL;
+		break;
+	case ENGINE_ID_DIGC:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL;
+		break;
+	case ENGINE_ID_DIGD:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL;
+		break;
+	case ENGINE_ID_DIGE:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL;
+		break;
+	case ENGINE_ID_DIGF:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL;
+		break;
+	case ENGINE_ID_DIGG:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL;
+		break;
+	default:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+		break;
+	}
+
+	return atom_dig_encoder_sel;
+}
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+	uint8_t atom_phy_id;
+
+	switch (t) {
+	case TRANSMITTER_UNIPHY_A:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+		break;
+	case TRANSMITTER_UNIPHY_B:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+		break;
+	case TRANSMITTER_UNIPHY_C:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+		break;
+	case TRANSMITTER_UNIPHY_D:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+		break;
+	case TRANSMITTER_UNIPHY_E:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+		break;
+	case TRANSMITTER_UNIPHY_F:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+		break;
+	case TRANSMITTER_UNIPHY_G:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+		break;
+	default:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+		break;
+	}
+	return atom_phy_id;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+	enum bp_pipe_control_action action)
+{
+	uint8_t atom_pipe_action = 0;
+
+	switch (action) {
+	case ASIC_PIPE_DISABLE:
+		atom_pipe_action = ATOM_DISABLE;
+		break;
+	case ASIC_PIPE_ENABLE:
+		atom_pipe_action = ATOM_ENABLE;
+		break;
+	case ASIC_PIPE_INIT:
+		atom_pipe_action = ATOM_INIT;
+		break;
+	default:
+		BREAK_TO_DEBUGGER(); /* Unhandle action in driver! */
+		break;
+	}
+
+	return atom_pipe_action;
+}
+
+static const struct command_table_helper command_table_helper_funcs = {
+	.controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
+	.encoder_action_to_atom = encoder_action_to_atom,
+	.engine_bp_to_atom = engine_bp_to_atom,
+	.clock_source_id_to_atom = clock_source_id_to_atom,
+	.clock_source_id_to_atom_phy_clk_src_id =
+		clock_source_id_to_atom_phy_clk_src_id,
+	.signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+	.hpd_sel_to_atom = hpd_sel_to_atom,
+	.dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+	.phy_id_to_atom = phy_id_to_atom,
+	.disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+	.assign_control_parameter =
+		dal_cmd_table_helper_assign_control_parameter,
+	.clock_source_id_to_ref_clk_src =
+		dal_cmd_table_helper_clock_source_id_to_ref_clk_src,
+	.transmitter_bp_to_atom = dal_cmd_table_helper_transmitter_bp_to_atom,
+	.encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
+	.encoder_mode_bp_to_atom =
+		dal_cmd_table_helper_encoder_mode_bp_to_atom,
+};
+
+const struct command_table_helper *dal_cmd_tbl_helper_dce80_get_table()
+{
+	return &command_table_helper_funcs;
+}

+ 33 - 0
drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h

@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_DCE80_H__
+#define __DAL_COMMAND_TABLE_HELPER_DCE80_H__
+
+struct command_table_helper;
+
+const struct command_table_helper *dal_cmd_tbl_helper_dce80_get_table(void);
+
+#endif

+ 10 - 0
drivers/gpu/drm/amd/display/dc/calcs/Makefile

@@ -0,0 +1,10 @@
+#
+# Makefile for the 'calcs' sub-component of DAL.
+# It calculates Bandwidth and Watermarks values for HW programming
+#
+
+BW_CALCS = bandwidth_calcs.o bw_fixed.o gamma_calcs.o
+
+AMD_DAL_BW_CALCS = $(addprefix $(AMDDALPATH)/dc/calcs/,$(BW_CALCS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_BW_CALCS)

+ 3108 - 0
drivers/gpu/drm/amd/display/dc/calcs/bandwidth_calcs.c

@@ -0,0 +1,3108 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "bandwidth_calcs.h"
+#include "dc.h"
+#include "core_types.h"
+
+/*******************************************************************************
+ * Private Functions
+ ******************************************************************************/
+
+static void calculate_bandwidth(
+	const struct bw_calcs_dceip *dceip,
+	const struct bw_calcs_vbios *vbios,
+	struct bw_calcs_data *data)
+
+{
+	const int32_t pixels_per_chunk = 512;
+	const int32_t high = 2;
+	const int32_t mid = 1;
+	const int32_t low = 0;
+	const uint32_t s_low = 0;
+	const uint32_t s_mid1 = 1;
+	const uint32_t s_mid2 = 2;
+	const uint32_t s_mid3 = 3;
+	const uint32_t s_mid4 = 4;
+	const uint32_t s_mid5 = 5;
+	const uint32_t s_mid6 = 6;
+	const uint32_t s_high = 7;
+	const uint32_t bus_efficiency = 1;
+	const uint32_t dmif_chunk_buff_margin = 1;
+
+	uint32_t max_chunks_fbc_mode;
+	int32_t num_cursor_lines;
+
+	int32_t i, j, k;
+	struct bw_fixed yclk[3];
+	struct bw_fixed sclk[8];
+	bool d0_underlay_enable;
+	bool d1_underlay_enable;
+	bool fbc_enabled;
+	bool lpt_enabled;
+	enum bw_defines sclk_message;
+	enum bw_defines yclk_message;
+	enum bw_defines v_filter_init_mode[maximum_number_of_surfaces];
+	enum bw_defines tiling_mode[maximum_number_of_surfaces];
+	enum bw_defines surface_type[maximum_number_of_surfaces];
+	enum bw_defines voltage;
+	enum bw_defines pipe_check;
+	enum bw_defines hsr_check;
+	enum bw_defines vsr_check;
+	enum bw_defines lb_size_check;
+	enum bw_defines fbc_check;
+	enum bw_defines rotation_check;
+	enum bw_defines mode_check;
+	enum bw_defines nbp_state_change_enable_blank;
+	/*initialize variables*/
+	int32_t number_of_displays_enabled = 0;
+	int32_t number_of_displays_enabled_with_margin = 0;
+	int32_t number_of_aligned_displays_with_no_margin = 0;
+
+	yclk[low] = vbios->low_yclk;
+	yclk[mid] = vbios->mid_yclk;
+	yclk[high] = vbios->high_yclk;
+	sclk[s_low] = vbios->low_sclk;
+	sclk[s_mid1] = vbios->mid1_sclk;
+	sclk[s_mid2] = vbios->mid2_sclk;
+	sclk[s_mid3] = vbios->mid3_sclk;
+	sclk[s_mid4] = vbios->mid4_sclk;
+	sclk[s_mid5] = vbios->mid5_sclk;
+	sclk[s_mid6] = vbios->mid6_sclk;
+	sclk[s_high] = vbios->high_sclk;
+	/*''''''''''''''''''*/
+	/* surface assignment:*/
+	/* 0: d0 underlay or underlay luma*/
+	/* 1: d0 underlay chroma*/
+	/* 2: d1 underlay or underlay luma*/
+	/* 3: d1 underlay chroma*/
+	/* 4: d0 graphics*/
+	/* 5: d1 graphics*/
+	/* 6: d2 graphics*/
+	/* 7: d3 graphics, same mode as d2*/
+	/* 8: d4 graphics, same mode as d2*/
+	/* 9: d5 graphics, same mode as d2*/
+	/* ...*/
+	/* maximum_number_of_surfaces-2: d1 display_write_back420 luma*/
+	/* maximum_number_of_surfaces-1: d1 display_write_back420 chroma*/
+	/* underlay luma and chroma surface parameters from spreadsheet*/
+
+
+
+
+	if (data->d0_underlay_mode == bw_def_none) { d0_underlay_enable = 0; }
+	else {
+		d0_underlay_enable = 1;
+	}
+	if (data->d1_underlay_mode == bw_def_none) { d1_underlay_enable = 0; }
+	else {
+		d1_underlay_enable = 1;
+	}
+	data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable;
+	switch (data->underlay_surface_type) {
+	case bw_def_420:
+		surface_type[0] = bw_def_underlay420_luma;
+		surface_type[2] = bw_def_underlay420_luma;
+		data->bytes_per_pixel[0] = 1;
+		data->bytes_per_pixel[2] = 1;
+		surface_type[1] = bw_def_underlay420_chroma;
+		surface_type[3] = bw_def_underlay420_chroma;
+		data->bytes_per_pixel[1] = 2;
+		data->bytes_per_pixel[3] = 2;
+		data->lb_size_per_component[0] = dceip->underlay420_luma_lb_size_per_component;
+		data->lb_size_per_component[1] = dceip->underlay420_chroma_lb_size_per_component;
+		data->lb_size_per_component[2] = dceip->underlay420_luma_lb_size_per_component;
+		data->lb_size_per_component[3] = dceip->underlay420_chroma_lb_size_per_component;
+		break;
+	case bw_def_422:
+		surface_type[0] = bw_def_underlay422;
+		surface_type[2] = bw_def_underlay422;
+		data->bytes_per_pixel[0] = 2;
+		data->bytes_per_pixel[2] = 2;
+		data->lb_size_per_component[0] = dceip->underlay422_lb_size_per_component;
+		data->lb_size_per_component[2] = dceip->underlay422_lb_size_per_component;
+		break;
+	default:
+		surface_type[0] = bw_def_underlay444;
+		surface_type[2] = bw_def_underlay444;
+		data->bytes_per_pixel[0] = 4;
+		data->bytes_per_pixel[2] = 4;
+		data->lb_size_per_component[0] = dceip->lb_size_per_component444;
+		data->lb_size_per_component[2] = dceip->lb_size_per_component444;
+		break;
+	}
+	if (d0_underlay_enable) {
+		switch (data->underlay_surface_type) {
+		case bw_def_420:
+			data->enable[0] = 1;
+			data->enable[1] = 1;
+			break;
+		default:
+			data->enable[0] = 1;
+			data->enable[1] = 0;
+			break;
+		}
+	}
+	else {
+		data->enable[0] = 0;
+		data->enable[1] = 0;
+	}
+	if (d1_underlay_enable) {
+		switch (data->underlay_surface_type) {
+		case bw_def_420:
+			data->enable[2] = 1;
+			data->enable[3] = 1;
+			break;
+		default:
+			data->enable[2] = 1;
+			data->enable[3] = 0;
+			break;
+		}
+	}
+	else {
+		data->enable[2] = 0;
+		data->enable[3] = 0;
+	}
+	data->use_alpha[0] = 0;
+	data->use_alpha[1] = 0;
+	data->use_alpha[2] = 0;
+	data->use_alpha[3] = 0;
+	data->scatter_gather_enable_for_pipe[0] = vbios->scatter_gather_enable;
+	data->scatter_gather_enable_for_pipe[1] = vbios->scatter_gather_enable;
+	data->scatter_gather_enable_for_pipe[2] = vbios->scatter_gather_enable;
+	data->scatter_gather_enable_for_pipe[3] = vbios->scatter_gather_enable;
+	/*underlay0 same and graphics display pipe0*/
+	data->interlace_mode[0] = data->interlace_mode[4];
+	data->interlace_mode[1] = data->interlace_mode[4];
+	/*underlay1 same and graphics display pipe1*/
+	data->interlace_mode[2] = data->interlace_mode[5];
+	data->interlace_mode[3] = data->interlace_mode[5];
+	/*underlay0 same and graphics display pipe0*/
+	data->h_total[0] = data->h_total[4];
+	data->v_total[0] = data->v_total[4];
+	data->h_total[1] = data->h_total[4];
+	data->v_total[1] = data->v_total[4];
+	/*underlay1 same and graphics display pipe1*/
+	data->h_total[2] = data->h_total[5];
+	data->v_total[2] = data->v_total[5];
+	data->h_total[3] = data->h_total[5];
+	data->v_total[3] = data->v_total[5];
+	/*underlay0 same and graphics display pipe0*/
+	data->pixel_rate[0] = data->pixel_rate[4];
+	data->pixel_rate[1] = data->pixel_rate[4];
+	/*underlay1 same and graphics display pipe1*/
+	data->pixel_rate[2] = data->pixel_rate[5];
+	data->pixel_rate[3] = data->pixel_rate[5];
+	if ((data->underlay_tiling_mode == bw_def_array_linear_general || data->underlay_tiling_mode == bw_def_array_linear_aligned)) {
+		tiling_mode[0] = bw_def_linear;
+		tiling_mode[1] = bw_def_linear;
+		tiling_mode[2] = bw_def_linear;
+		tiling_mode[3] = bw_def_linear;
+	}
+	else {
+		tiling_mode[0] = bw_def_landscape;
+		tiling_mode[1] = bw_def_landscape;
+		tiling_mode[2] = bw_def_landscape;
+		tiling_mode[3] = bw_def_landscape;
+	}
+	data->lb_bpc[0] = data->underlay_lb_bpc;
+	data->lb_bpc[1] = data->underlay_lb_bpc;
+	data->lb_bpc[2] = data->underlay_lb_bpc;
+	data->lb_bpc[3] = data->underlay_lb_bpc;
+	data->compression_rate[0] = bw_int_to_fixed(1);
+	data->compression_rate[1] = bw_int_to_fixed(1);
+	data->compression_rate[2] = bw_int_to_fixed(1);
+	data->compression_rate[3] = bw_int_to_fixed(1);
+	data->access_one_channel_only[0] = 0;
+	data->access_one_channel_only[1] = 0;
+	data->access_one_channel_only[2] = 0;
+	data->access_one_channel_only[3] = 0;
+	data->cursor_width_pixels[0] = bw_int_to_fixed(0);
+	data->cursor_width_pixels[1] = bw_int_to_fixed(0);
+	data->cursor_width_pixels[2] = bw_int_to_fixed(0);
+	data->cursor_width_pixels[3] = bw_int_to_fixed(0);
+	/* graphics surface parameters from spreadsheet*/
+	fbc_enabled = 0;
+	lpt_enabled = 0;
+	for (i = 4; i <= maximum_number_of_surfaces - 3; i++) {
+		if (i < data->number_of_displays + 4) {
+			if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) {
+				data->enable[i] = 0;
+				data->use_alpha[i] = 0;
+			}
+			else if (i == 4 && data->d0_underlay_mode == bw_def_blend) {
+				data->enable[i] = 1;
+				data->use_alpha[i] = 1;
+			}
+			else if (i == 4) {
+				data->enable[i] = 1;
+				data->use_alpha[i] = 0;
+			}
+			else if (i == 5 && data->d1_underlay_mode == bw_def_underlay_only) {
+				data->enable[i] = 0;
+				data->use_alpha[i] = 0;
+			}
+			else if (i == 5 && data->d1_underlay_mode == bw_def_blend) {
+				data->enable[i] = 1;
+				data->use_alpha[i] = 1;
+			}
+			else {
+				data->enable[i] = 1;
+				data->use_alpha[i] = 0;
+			}
+		}
+		else {
+			data->enable[i] = 0;
+			data->use_alpha[i] = 0;
+		}
+		data->scatter_gather_enable_for_pipe[i] = vbios->scatter_gather_enable;
+		surface_type[i] = bw_def_graphics;
+		data->lb_size_per_component[i] = dceip->lb_size_per_component444;
+		if (data->graphics_tiling_mode == bw_def_array_linear_general || data->graphics_tiling_mode == bw_def_array_linear_aligned) {
+			tiling_mode[i] = bw_def_linear;
+		}
+		else {
+			tiling_mode[i] = bw_def_tiled;
+		}
+		data->lb_bpc[i] = data->graphics_lb_bpc;
+		if ((data->fbc_en[i] == 1 && (dceip->argb_compression_support || data->d0_underlay_mode != bw_def_blended))) {
+			data->compression_rate[i] = bw_int_to_fixed(vbios->average_compression_rate);
+			data->access_one_channel_only[i] = data->lpt_en[i];
+		}
+		else {
+			data->compression_rate[i] = bw_int_to_fixed(1);
+			data->access_one_channel_only[i] = 0;
+		}
+		if (data->fbc_en[i] == 1) {
+			fbc_enabled = 1;
+			if (data->lpt_en[i] == 1) {
+				lpt_enabled = 1;
+			}
+		}
+		data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width);
+	}
+	/* display_write_back420*/
+	data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 2] = 0;
+	data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 1] = 0;
+	if (data->d1_display_write_back_dwb_enable == 1) {
+		data->enable[maximum_number_of_surfaces - 2] = 1;
+		data->enable[maximum_number_of_surfaces - 1] = 1;
+	}
+	else {
+		data->enable[maximum_number_of_surfaces - 2] = 0;
+		data->enable[maximum_number_of_surfaces - 1] = 0;
+	}
+	surface_type[maximum_number_of_surfaces - 2] = bw_def_display_write_back420_luma;
+	surface_type[maximum_number_of_surfaces - 1] = bw_def_display_write_back420_chroma;
+	data->lb_size_per_component[maximum_number_of_surfaces - 2] = dceip->underlay420_luma_lb_size_per_component;
+	data->lb_size_per_component[maximum_number_of_surfaces - 1] = dceip->underlay420_chroma_lb_size_per_component;
+	data->bytes_per_pixel[maximum_number_of_surfaces - 2] = 1;
+	data->bytes_per_pixel[maximum_number_of_surfaces - 1] = 2;
+	data->interlace_mode[maximum_number_of_surfaces - 2] = data->interlace_mode[5];
+	data->interlace_mode[maximum_number_of_surfaces - 1] = data->interlace_mode[5];
+	data->h_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+	data->h_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+	data->v_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+	data->v_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+	data->rotation_angle[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0);
+	data->rotation_angle[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0);
+	tiling_mode[maximum_number_of_surfaces - 2] = bw_def_linear;
+	tiling_mode[maximum_number_of_surfaces - 1] = bw_def_linear;
+	data->lb_bpc[maximum_number_of_surfaces - 2] = 8;
+	data->lb_bpc[maximum_number_of_surfaces - 1] = 8;
+	data->compression_rate[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+	data->compression_rate[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+	data->access_one_channel_only[maximum_number_of_surfaces - 2] = 0;
+	data->access_one_channel_only[maximum_number_of_surfaces - 1] = 0;
+	/*assume display pipe1 has dwb enabled*/
+	data->h_total[maximum_number_of_surfaces - 2] = data->h_total[5];
+	data->h_total[maximum_number_of_surfaces - 1] = data->h_total[5];
+	data->v_total[maximum_number_of_surfaces - 2] = data->v_total[5];
+	data->v_total[maximum_number_of_surfaces - 1] = data->v_total[5];
+	data->pixel_rate[maximum_number_of_surfaces - 2] = data->pixel_rate[5];
+	data->pixel_rate[maximum_number_of_surfaces - 1] = data->pixel_rate[5];
+	data->src_width[maximum_number_of_surfaces - 2] = data->src_width[5];
+	data->src_width[maximum_number_of_surfaces - 1] = data->src_width[5];
+	data->src_height[maximum_number_of_surfaces - 2] = data->src_height[5];
+	data->src_height[maximum_number_of_surfaces - 1] = data->src_height[5];
+	data->pitch_in_pixels[maximum_number_of_surfaces - 2] = data->src_width[5];
+	data->pitch_in_pixels[maximum_number_of_surfaces - 1] = data->src_width[5];
+	data->h_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+	data->h_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+	data->v_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+	data->v_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+	data->stereo_mode[maximum_number_of_surfaces - 2] = bw_def_mono;
+	data->stereo_mode[maximum_number_of_surfaces - 1] = bw_def_mono;
+	data->cursor_width_pixels[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0);
+	data->cursor_width_pixels[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0);
+	data->use_alpha[maximum_number_of_surfaces - 2] = 0;
+	data->use_alpha[maximum_number_of_surfaces - 1] = 0;
+	/*mode check calculations:*/
+	/* mode within dce ip capabilities*/
+	/* fbc*/
+	/* hsr*/
+	/* vsr*/
+	/* lb size*/
+	/*effective scaling source and ratios:*/
+	/*for graphics, non-stereo, non-interlace surfaces when the size of the source and destination are the same, only one tap is used*/
+	/*420 chroma has half the width, height, horizontal and vertical scaling ratios than luma*/
+	/*rotating a graphic or underlay surface swaps the width, height, horizontal and vertical scaling ratios*/
+	/*in top-bottom stereo mode there is 2:1 vertical downscaling for each eye*/
+	/*in side-by-side stereo mode there is 2:1 horizontal downscaling for each eye*/
+	/*in interlace mode there is 2:1 vertical downscaling for each field*/
+	/*in panning or bezel adjustment mode the source width has an extra 128 pixels*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (bw_equ(data->h_scale_ratio[i], bw_int_to_fixed(1)) && bw_equ(data->v_scale_ratio[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics && data->stereo_mode[i] == bw_def_mono && data->interlace_mode[i] == 0) {
+				data->h_taps[i] = bw_int_to_fixed(1);
+				data->v_taps[i] = bw_int_to_fixed(1);
+			}
+			if (surface_type[i] == bw_def_display_write_back420_chroma || surface_type[i] == bw_def_underlay420_chroma) {
+				data->pitch_in_pixels_after_surface_type[i] = bw_div(data->pitch_in_pixels[i], bw_int_to_fixed(2));
+				data->src_width_after_surface_type = bw_div(data->src_width[i], bw_int_to_fixed(2));
+				data->src_height_after_surface_type = bw_div(data->src_height[i], bw_int_to_fixed(2));
+				data->hsr_after_surface_type = bw_div(data->h_scale_ratio[i], bw_int_to_fixed(2));
+				data->vsr_after_surface_type = bw_div(data->v_scale_ratio[i], bw_int_to_fixed(2));
+			}
+			else {
+				data->pitch_in_pixels_after_surface_type[i] = data->pitch_in_pixels[i];
+				data->src_width_after_surface_type = data->src_width[i];
+				data->src_height_after_surface_type = data->src_height[i];
+				data->hsr_after_surface_type = data->h_scale_ratio[i];
+				data->vsr_after_surface_type = data->v_scale_ratio[i];
+			}
+			if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+				data->src_width_after_rotation = data->src_height_after_surface_type;
+				data->src_height_after_rotation = data->src_width_after_surface_type;
+				data->hsr_after_rotation = data->vsr_after_surface_type;
+				data->vsr_after_rotation = data->hsr_after_surface_type;
+			}
+			else {
+				data->src_width_after_rotation = data->src_width_after_surface_type;
+				data->src_height_after_rotation = data->src_height_after_surface_type;
+				data->hsr_after_rotation = data->hsr_after_surface_type;
+				data->vsr_after_rotation = data->vsr_after_surface_type;
+			}
+			switch (data->stereo_mode[i]) {
+			case bw_def_top_bottom:
+				data->source_width_pixels[i] = data->src_width_after_rotation;
+				data->source_height_pixels = bw_mul(bw_int_to_fixed(2), data->src_height_after_rotation);
+				data->hsr_after_stereo = data->hsr_after_rotation;
+				data->vsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->vsr_after_rotation);
+				break;
+			case bw_def_side_by_side:
+				data->source_width_pixels[i] = bw_mul(bw_int_to_fixed(2), data->src_width_after_rotation);
+				data->source_height_pixels = data->src_height_after_rotation;
+				data->hsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->hsr_after_rotation);
+				data->vsr_after_stereo = data->vsr_after_rotation;
+				break;
+			default:
+				data->source_width_pixels[i] = data->src_width_after_rotation;
+				data->source_height_pixels = data->src_height_after_rotation;
+				data->hsr_after_stereo = data->hsr_after_rotation;
+				data->vsr_after_stereo = data->vsr_after_rotation;
+				break;
+			}
+			data->hsr[i] = data->hsr_after_stereo;
+			if (data->interlace_mode[i]) {
+				data->vsr[i] = bw_mul(data->vsr_after_stereo, bw_int_to_fixed(2));
+			}
+			else {
+				data->vsr[i] = data->vsr_after_stereo;
+			}
+			if (data->panning_and_bezel_adjustment != bw_def_none) {
+				data->source_width_rounded_up_to_chunks[i] = bw_add(bw_floor2(bw_sub(data->source_width_pixels[i], bw_int_to_fixed(1)), bw_int_to_fixed(128)), bw_int_to_fixed(256));
+			}
+			else {
+				data->source_width_rounded_up_to_chunks[i] = bw_ceil2(data->source_width_pixels[i], bw_int_to_fixed(128));
+			}
+			data->source_height_rounded_up_to_chunks[i] = data->source_height_pixels;
+		}
+	}
+	/*mode support checks:*/
+	/*the number of graphics and underlay pipes is limited by the ip support*/
+	/*maximum horizontal and vertical scale ratio is 4, and should not exceed the number of taps*/
+	/*for downscaling with the pre-downscaler, the horizontal scale ratio must be more than the ceiling of one quarter of the number of taps*/
+	/*the pre-downscaler reduces the line buffer source by the horizontal scale ratio*/
+	/*the number of lines in the line buffer has to exceed the number of vertical taps*/
+	/*the size of the line in the line buffer is the product of the source width and the bits per component, rounded up to a multiple of 48*/
+	/*the size of the line in the line buffer in the case of 10 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/
+	/*the size of the line in the line buffer in the case of 8 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/
+	/*frame buffer compression is not supported with stereo mode, rotation, or non- 888 formats*/
+	/*rotation is not supported with linear of stereo modes*/
+	if (dceip->number_of_graphics_pipes >= data->number_of_displays && dceip->number_of_underlay_pipes >= data->number_of_underlay_surfaces && !(dceip->display_write_back_supported == 0 && data->d1_display_write_back_dwb_enable == 1)) {
+		pipe_check = bw_def_ok;
+	}
+	else {
+		pipe_check = bw_def_notok;
+	}
+	hsr_check = bw_def_ok;
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (bw_neq(data->hsr[i], bw_int_to_fixed(1))) {
+				if (bw_mtn(data->hsr[i], bw_int_to_fixed(4))) {
+					hsr_check = bw_def_hsr_mtn_4;
+				}
+				else {
+					if (bw_mtn(data->hsr[i], data->h_taps[i])) {
+						hsr_check = bw_def_hsr_mtn_h_taps;
+					}
+					else {
+						if (dceip->pre_downscaler_enabled == 1 && bw_mtn(data->hsr[i], bw_int_to_fixed(1)) && bw_leq(data->hsr[i], bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)))) {
+							hsr_check = bw_def_ceiling__h_taps_div_4___meq_hsr;
+						}
+					}
+				}
+			}
+		}
+	}
+	vsr_check = bw_def_ok;
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (bw_neq(data->vsr[i], bw_int_to_fixed(1))) {
+				if (bw_mtn(data->vsr[i], bw_int_to_fixed(4))) {
+					vsr_check = bw_def_vsr_mtn_4;
+				}
+				else {
+					if (bw_mtn(data->vsr[i], data->v_taps[i])) {
+						vsr_check = bw_def_vsr_mtn_v_taps;
+					}
+				}
+			}
+		}
+	}
+	lb_size_check = bw_def_ok;
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if ((dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1)))) {
+				data->source_width_in_lb = bw_div(data->source_width_pixels[i], data->hsr[i]);
+			}
+			else {
+				data->source_width_in_lb = data->source_width_pixels[i];
+			}
+			switch (data->lb_bpc[i]) {
+			case 8:
+				data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(2401171875, 100000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48));
+				break;
+			case 10:
+				data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(300234375, 10000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48));
+				break;
+			default:
+				data->lb_line_pitch = bw_ceil2(bw_mul(bw_int_to_fixed(data->lb_bpc[i]), data->source_width_in_lb), bw_int_to_fixed(48));
+				break;
+			}
+			data->lb_partitions[i] = bw_floor2(bw_div(data->lb_size_per_component[i], data->lb_line_pitch), bw_int_to_fixed(1));
+			/*clamp the partitions to the maxium number supported by the lb*/
+			if ((surface_type[i] != bw_def_graphics || dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) {
+				data->lb_partitions_max[i] = bw_int_to_fixed(10);
+			}
+			else {
+				data->lb_partitions_max[i] = bw_int_to_fixed(7);
+			}
+			data->lb_partitions[i] = bw_min2(data->lb_partitions_max[i], data->lb_partitions[i]);
+			if (bw_mtn(bw_add(data->v_taps[i], bw_int_to_fixed(1)), data->lb_partitions[i])) {
+				lb_size_check = bw_def_notok;
+			}
+		}
+	}
+	fbc_check = bw_def_ok;
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i] && data->fbc_en[i] == 1 && (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)) || data->stereo_mode[i] != bw_def_mono || data->bytes_per_pixel[i] != 4)) {
+			fbc_check = bw_def_invalid_rotation_or_bpp_or_stereo;
+		}
+	}
+	rotation_check = bw_def_ok;
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && (tiling_mode[i] == bw_def_linear || data->stereo_mode[i] != bw_def_mono)) {
+				rotation_check = bw_def_invalid_linear_or_stereo_mode;
+			}
+		}
+	}
+	if (pipe_check == bw_def_ok && hsr_check == bw_def_ok && vsr_check == bw_def_ok && lb_size_check == bw_def_ok && fbc_check == bw_def_ok && rotation_check == bw_def_ok) {
+		mode_check = bw_def_ok;
+	}
+	else {
+		mode_check = bw_def_notok;
+	}
+	/*number of memory channels for write-back client*/
+	data->number_of_dram_wrchannels = vbios->number_of_dram_channels;
+	data->number_of_dram_channels = vbios->number_of_dram_channels;
+	/*modify number of memory channels if lpt mode is enabled*/
+	/* low power tiling mode register*/
+	/* 0 = use channel 0*/
+	/* 1 = use channel 0 and 1*/
+	/* 2 = use channel 0,1,2,3*/
+	if ((fbc_enabled == 1 && lpt_enabled == 1)) {
+		data->dram_efficiency = bw_int_to_fixed(1);
+		if (dceip->low_power_tiling_mode == 0) {
+			data->number_of_dram_channels = 1;
+		}
+		else if (dceip->low_power_tiling_mode == 1) {
+			data->number_of_dram_channels = 2;
+		}
+		else if (dceip->low_power_tiling_mode == 2) {
+			data->number_of_dram_channels = 4;
+		}
+		else {
+			data->number_of_dram_channels = 1;
+		}
+	}
+	else {
+		data->dram_efficiency = bw_frc_to_fixed(8, 10);
+	}
+	/*memory request size and latency hiding:*/
+	/*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/
+	/*the display write-back requests are single line*/
+	/*for tiled graphics surfaces, or undelay surfaces with width higher than the maximum size for full efficiency, request size is 32 byte in 8 and 16 bpp or if the rotation is orthogonal to the tiling grain. only half is useful of the bytes in the request size in 8 bpp or in 32 bpp if the rotation is orthogonal to the tiling grain.*/
+	/*for undelay surfaces with width lower than the maximum size for full efficiency, requests are 4-line interleaved in 16bpp if the rotation is parallel to the tiling grain, and 8-line interleaved with 4-line latency hiding in 8bpp or if the rotation is orthogonal to the tiling grain.*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)))) {
+				if ((i < 4)) {
+					/*underlay portrait tiling mode is not supported*/
+					data->orthogonal_rotation[i] = 1;
+				}
+				else {
+					/*graphics portrait tiling mode*/
+					if ((data->graphics_micro_tile_mode == bw_def_rotated_micro_tiling)) {
+						data->orthogonal_rotation[i] = 0;
+					}
+					else {
+						data->orthogonal_rotation[i] = 1;
+					}
+				}
+			}
+			else {
+				if ((i < 4)) {
+					/*underlay landscape tiling mode is only supported*/
+					if ((data->underlay_micro_tile_mode == bw_def_display_micro_tiling)) {
+						data->orthogonal_rotation[i] = 0;
+					}
+					else {
+						data->orthogonal_rotation[i] = 1;
+					}
+				}
+				else {
+					/*graphics landscape tiling mode*/
+					if ((data->graphics_micro_tile_mode == bw_def_display_micro_tiling)) {
+						data->orthogonal_rotation[i] = 0;
+					}
+					else {
+						data->orthogonal_rotation[i] = 1;
+					}
+				}
+			}
+			if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) {
+				data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_height_efficient_for_tiling;
+			}
+			else {
+				data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_width_efficient_for_tiling;
+			}
+			if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
+				data->bytes_per_request[i] = bw_int_to_fixed(64);
+				data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+				data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(1);
+				data->latency_hiding_lines[i] = bw_int_to_fixed(1);
+			}
+			else if (tiling_mode[i] == bw_def_linear) {
+				data->bytes_per_request[i] = bw_int_to_fixed(64);
+				data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+				data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+				data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+			}
+			else {
+				if (surface_type[i] == bw_def_graphics || (bw_mtn(data->source_width_rounded_up_to_chunks[i], bw_ceil2(data->underlay_maximum_source_efficient_for_tiling, bw_int_to_fixed(256))))) {
+					switch (data->bytes_per_pixel[i]) {
+					case 8:
+						data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+						data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+						if (data->orthogonal_rotation[i]) {
+							data->bytes_per_request[i] = bw_int_to_fixed(32);
+							data->useful_bytes_per_request[i] = bw_int_to_fixed(32);
+						}
+						else {
+							data->bytes_per_request[i] = bw_int_to_fixed(64);
+							data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+						}
+						break;
+					case 4:
+						if (data->orthogonal_rotation[i]) {
+							data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+							data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+							data->bytes_per_request[i] = bw_int_to_fixed(32);
+							data->useful_bytes_per_request[i] = bw_int_to_fixed(16);
+						}
+						else {
+							data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+							data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+							data->bytes_per_request[i] = bw_int_to_fixed(64);
+							data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+						}
+						break;
+					case 2:
+						data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+						data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+						data->bytes_per_request[i] = bw_int_to_fixed(32);
+						data->useful_bytes_per_request[i] = bw_int_to_fixed(32);
+						break;
+					default:
+						data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+						data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+						data->bytes_per_request[i] = bw_int_to_fixed(32);
+						data->useful_bytes_per_request[i] = bw_int_to_fixed(16);
+						break;
+					}
+				}
+				else {
+					data->bytes_per_request[i] = bw_int_to_fixed(64);
+					data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+					if (data->orthogonal_rotation[i]) {
+						data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8);
+						data->latency_hiding_lines[i] = bw_int_to_fixed(4);
+					}
+					else {
+						switch (data->bytes_per_pixel[i]) {
+						case 4:
+							data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+							data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+							break;
+						case 2:
+							data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(4);
+							data->latency_hiding_lines[i] = bw_int_to_fixed(4);
+							break;
+						default:
+							data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8);
+							data->latency_hiding_lines[i] = bw_int_to_fixed(4);
+							break;
+						}
+					}
+				}
+			}
+		}
+	}
+	/*requested peak bandwidth:*/
+	/*the peak request-per-second bandwidth is the product of the maximum source lines in per line out in the beginning*/
+	/*and in the middle of the frame, the ratio of the source width to the line time, the ratio of line interleaving*/
+	/*in memory to lines of latency hiding, and the ratio of bytes per pixel to useful bytes per request.*/
+	/**/
+	/*if the dmif data buffer size holds more than vta_ps worth of source lines, then only vsr is used.*/
+	/*the peak bandwidth is the peak request-per-second bandwidth times the request size.*/
+	/**/
+	/*the line buffer lines in per line out in the beginning of the frame is the vertical filter initialization value*/
+	/*rounded up to even and divided by the line times for initialization, which is normally three.*/
+	/*the line buffer lines in per line out in the middle of the frame is at least one, or the vertical scale ratio,*/
+	/*rounded up to line pairs if not doing line buffer prefetching.*/
+	/**/
+	/*the non-prefetching rounding up of the vertical scale ratio can also be done up to 1 (for a 0,2 pattern), 4/3 (for a 0,2,2 pattern),*/
+	/*6/4 (for a 0,2,2,2 pattern), or 3 (for a 2,4 pattern).*/
+	/**/
+	/*the scaler vertical filter initialization value is calculated by the hardware as the floor of the average of the*/
+	/*vertical scale ratio and the number of vertical taps increased by one.  add one more for possible odd line*/
+	/*panning/bezel adjustment mode.*/
+	/**/
+	/*for the bottom interlace field an extra 50% of the vertical scale ratio is considered for this calculation.*/
+	/*in top-bottom stereo mode software has to set the filter initialization value manually and explicitly limit it to 4.*/
+	/*furthermore, there is only one line time for initialization.*/
+	/**/
+	/*line buffer prefetching is done when the number of lines in the line buffer exceeds the number of taps plus*/
+	/*the ceiling of the vertical scale ratio.*/
+	/**/
+	/*multi-line buffer prefetching is only done in the graphics pipe when the scaler is disabled or when upscaling and the vsr <= 0.8.'*/
+	/**/
+	/*the horizontal blank and chunk granularity factor is indirectly used indicate the interval of time required to transfer the source pixels.*/
+	/*the denominator of this term represents the total number of destination output pixels required for the input source pixels.*/
+	/*it applies when the lines in per line out is not 2 or 4.  it does not apply when there is a line buffer between the scl and blnd.*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->v_filter_init[i] = bw_floor2(bw_div((bw_add(bw_add(bw_add(bw_int_to_fixed(1), data->v_taps[i]), data->vsr[i]), bw_mul(bw_mul(bw_int_to_fixed(data->interlace_mode[i]), bw_frc_to_fixed(5, 10)), data->vsr[i]))), bw_int_to_fixed(2)), bw_int_to_fixed(1));
+			if (data->panning_and_bezel_adjustment == bw_def_any_lines) {
+				data->v_filter_init[i] = bw_add(data->v_filter_init[i], bw_int_to_fixed(1));
+			}
+			if (data->stereo_mode[i] == bw_def_top_bottom) {
+				v_filter_init_mode[i] = bw_def_manual;
+				data->v_filter_init[i] = bw_min2(data->v_filter_init[i], bw_int_to_fixed(4));
+			}
+			else {
+				v_filter_init_mode[i] = bw_def_auto;
+			}
+			if (data->stereo_mode[i] == bw_def_top_bottom) {
+				data->num_lines_at_frame_start = bw_int_to_fixed(1);
+			}
+			else {
+				data->num_lines_at_frame_start = bw_int_to_fixed(3);
+			}
+			if ((bw_mtn(data->vsr[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics) || data->panning_and_bezel_adjustment == bw_def_any_lines) {
+				data->line_buffer_prefetch[i] = 0;
+			}
+			else if ((((dceip->underlay_downscale_prefetch_enabled == 1 && surface_type[i] != bw_def_graphics) || surface_type[i] == bw_def_graphics) && (bw_mtn(data->lb_partitions[i], bw_add(data->v_taps[i], bw_ceil2(data->vsr[i], bw_int_to_fixed(1))))))) {
+				data->line_buffer_prefetch[i] = 1;
+			}
+			else {
+				data->line_buffer_prefetch[i] = 0;
+			}
+			data->lb_lines_in_per_line_out_in_beginning_of_frame[i] = bw_div(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->num_lines_at_frame_start);
+			if (data->line_buffer_prefetch[i] == 1) {
+				data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_max2(bw_int_to_fixed(1), data->vsr[i]);
+			}
+			else if (bw_leq(data->vsr[i], bw_int_to_fixed(1))) {
+				data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(1);
+			}
+			else if (bw_leq(data->vsr[i], bw_int_to_fixed(4 / 3))) {
+				data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(4), bw_int_to_fixed(3));
+			}
+			else if (bw_leq(data->vsr[i], bw_int_to_fixed(6 / 4))) {
+				data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(6), bw_int_to_fixed(4));
+			}
+			else if (bw_leq(data->vsr[i], bw_int_to_fixed(2))) {
+				data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(2);
+			}
+			else if (bw_leq(data->vsr[i], bw_int_to_fixed(3))) {
+				data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(3);
+			}
+			else {
+				data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(4);
+			}
+			if (data->line_buffer_prefetch[i] == 1 || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(2)) || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(4))) {
+				data->horizontal_blank_and_chunk_granularity_factor[i] = bw_int_to_fixed(1);
+			}
+			else {
+				data->horizontal_blank_and_chunk_granularity_factor[i] = bw_div(data->h_total[i], (bw_div((bw_add(data->h_total[i], bw_div((bw_sub(data->source_width_pixels[i], bw_int_to_fixed(dceip->chunk_width))), data->hsr[i]))), bw_int_to_fixed(2))));
+			}
+			data->request_bandwidth[i] = bw_div(bw_mul(bw_div(bw_mul(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], data->lb_lines_in_per_line_out_in_middle_of_frame[i]), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), bw_int_to_fixed(data->bytes_per_pixel[i])), data->useful_bytes_per_request[i]), data->lines_interleaved_in_mem_access[i]), data->latency_hiding_lines[i]);
+			data->display_bandwidth[i] = bw_mul(data->request_bandwidth[i], data->bytes_per_request[i]);
+		}
+	}
+	/*outstanding chunk request limit*/
+	/*if underlay buffer sharing is enabled, the data buffer size for underlay in 422 or 444 is the sum of the luma and chroma data buffer sizes.*/
+	/*underlay buffer sharing mode is only permitted in orthogonal rotation modes.*/
+	/**/
+	/*if there is only one display enabled, the dmif data buffer size for the graphics surface is increased by concatenating the adjacent buffers.*/
+	/**/
+	/*the memory chunk size in bytes is 1024 for the writeback, and 256 times the memory line interleaving and the bytes per pixel for graphics*/
+	/*and underlay.*/
+	/**/
+	/*the pipe chunk size uses 2 for line interleaving, except for the write back, in which case it is 1.*/
+	/*graphics and underlay data buffer size is adjusted (limited) using the outstanding chunk request limit if there is more than one*/
+	/*display enabled or if the dmif request buffer is not large enough for the total data buffer size.*/
+	/*the outstanding chunk request limit is the ceiling of the adjusted data buffer size divided by the chunk size in bytes*/
+	/*the adjusted data buffer size is the product of the display bandwidth and the minimum effective data buffer size in terms of time,*/
+	/*rounded up to the chunk size in bytes, but should not exceed the original data buffer size*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if ((dceip->dmif_pipe_en_fbc_chunk_tracker + 3 == i && fbc_enabled == 0 && tiling_mode[i] != bw_def_linear)) {
+				data->max_chunks_non_fbc_mode[i] = 128 - dmif_chunk_buff_margin;
+			}
+			else {
+				data->max_chunks_non_fbc_mode[i] = 16 - dmif_chunk_buff_margin;
+			}
+		}
+		if (data->fbc_en[i] == 1) {
+			max_chunks_fbc_mode = 128 - dmif_chunk_buff_margin;
+		}
+	}
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			switch (surface_type[i]) {
+			case bw_def_display_write_back420_luma:
+				data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_luma_mcifwr_buffer_size);
+				break;
+			case bw_def_display_write_back420_chroma:
+				data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_chroma_mcifwr_buffer_size);
+				break;
+			case bw_def_underlay420_luma:
+				data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size);
+				break;
+			case bw_def_underlay420_chroma:
+				data->data_buffer_size[i] = bw_div(bw_int_to_fixed(dceip->underlay_chroma_dmif_size), bw_int_to_fixed(2));
+				break;
+			case bw_def_underlay422:case bw_def_underlay444:
+				if (data->orthogonal_rotation[i] == 0) {
+					data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size);
+				}
+				else {
+					data->data_buffer_size[i] = bw_add(bw_int_to_fixed(dceip->underlay_luma_dmif_size), bw_int_to_fixed(dceip->underlay_chroma_dmif_size));
+				}
+				break;
+			default:
+				if (data->fbc_en[i] == 1) {
+					/*data_buffer_size(i) = max_dmif_buffer_allocated * graphics_dmif_size*/
+					if (data->number_of_displays == 1) {
+						data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size)));
+					}
+					else {
+						data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size));
+					}
+				}
+				else {
+					/*the effective dmif buffer size in non-fbc mode is limited by the 16 entry chunk tracker*/
+					if (data->number_of_displays == 1) {
+						data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size)));
+					}
+					else {
+						data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size));
+					}
+				}
+				break;
+			}
+			if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
+				data->memory_chunk_size_in_bytes[i] = bw_int_to_fixed(1024);
+				data->pipe_chunk_size_in_bytes[i] = bw_int_to_fixed(1024);
+			}
+			else {
+				data->memory_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), data->lines_interleaved_in_mem_access[i]), bw_int_to_fixed(data->bytes_per_pixel[i]));
+				data->pipe_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_int_to_fixed(data->bytes_per_pixel[i]));
+			}
+		}
+	}
+	data->min_dmif_size_in_time = bw_int_to_fixed(9999);
+	data->min_mcifwr_size_in_time = bw_int_to_fixed(9999);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+				if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_dmif_size_in_time)) {
+					data->min_dmif_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]);
+				}
+			}
+			else {
+				if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_mcifwr_size_in_time)) {
+					data->min_mcifwr_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]);
+				}
+			}
+		}
+	}
+	data->total_requests_for_dmif_size = bw_int_to_fixed(0);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i] && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+			data->total_requests_for_dmif_size = bw_add(data->total_requests_for_dmif_size, bw_div(data->data_buffer_size[i], data->useful_bytes_per_request[i]));
+		}
+	}
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma && dceip->limit_excessive_outstanding_dmif_requests && (data->number_of_displays > 1 || bw_mtn(data->total_requests_for_dmif_size, dceip->dmif_request_buffer_size))) {
+				data->adjusted_data_buffer_size[i] = bw_min2(data->data_buffer_size[i], bw_ceil2(bw_mul(data->min_dmif_size_in_time, data->display_bandwidth[i]), data->memory_chunk_size_in_bytes[i]));
+			}
+			else {
+				data->adjusted_data_buffer_size[i] = data->data_buffer_size[i];
+			}
+		}
+	}
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if ((data->number_of_displays == 1 && data->number_of_underlay_surfaces == 0)) {
+				/*set maximum chunk limit if only one graphic pipe is enabled*/
+				data->outstanding_chunk_request_limit[i] = bw_int_to_fixed(127);
+			}
+			else {
+				data->outstanding_chunk_request_limit[i] = bw_ceil2(bw_div(data->adjusted_data_buffer_size[i], data->pipe_chunk_size_in_bytes[i]), bw_int_to_fixed(1));
+				/*clamp maximum chunk limit in the graphic display pipe*/
+				if ((i >= 4)) {
+					data->outstanding_chunk_request_limit[i] = bw_max2(bw_int_to_fixed(127), data->outstanding_chunk_request_limit[i]);
+				}
+			}
+		}
+	}
+	/*outstanding pte request limit*/
+	/*in tiling mode with no rotation the sg pte requests are 8 useful pt_es, the sg row height is the page height and the sg page width x height is 64x64 for 8bpp, 64x32 for 16 bpp, 32x32 for 32 bpp*/
+	/*in tiling mode with rotation the sg pte requests are only one useful pte, and the sg row height is also the page height, but the sg page width and height are swapped*/
+	/*in linear mode the pte requests are 8 useful pt_es, the sg page width is 4096 divided by the bytes per pixel, the sg page height is 1, but there is just one row whose height is the lines of pte prefetching*/
+	/*the outstanding pte request limit is obtained by multiplying the outstanding chunk request limit by the peak pte request to eviction limiting ratio, rounding up to integer, multiplying by the pte requests per chunk, and rounding up to integer again*/
+	/*if not using peak pte request to eviction limiting, the outstanding pte request limit is the pte requests in the vblank*/
+	/*the pte requests in the vblank is the product of the number of pte request rows times the number of pte requests in a row*/
+	/*the number of pte requests in a row is the quotient of the source width divided by 256, multiplied by the pte requests per chunk, rounded up to even, multiplied by the scatter-gather row height and divided by the scatter-gather page height*/
+	/*the pte requests per chunk is 256 divided by the scatter-gather page width and the useful pt_es per pte request*/
+	if (data->number_of_displays > 1 || (bw_neq(data->rotation_angle[4], bw_int_to_fixed(0)) && bw_neq(data->rotation_angle[4], bw_int_to_fixed(180)))) {
+		data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display;
+	}
+	else {
+		data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation;
+	}
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) {
+			if (tiling_mode[i] == bw_def_linear) {
+				data->useful_pte_per_pte_request = bw_int_to_fixed(8);
+				data->scatter_gather_page_width[i] = bw_div(bw_int_to_fixed(4096), bw_int_to_fixed(data->bytes_per_pixel[i]));
+				data->scatter_gather_page_height[i] = bw_int_to_fixed(1);
+				data->scatter_gather_pte_request_rows = bw_int_to_fixed(1);
+				data->scatter_gather_row_height = bw_int_to_fixed(dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode);
+			}
+			else if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(0)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(180))) {
+				data->useful_pte_per_pte_request = bw_int_to_fixed(8);
+				switch (data->bytes_per_pixel[i]) {
+				case 4:
+					data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
+					data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
+					break;
+				case 2:
+					data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
+					data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
+					break;
+				default:
+					data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
+					data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
+					break;
+				}
+				data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode);
+				data->scatter_gather_row_height = data->scatter_gather_page_height[i];
+			}
+			else {
+				data->useful_pte_per_pte_request = bw_int_to_fixed(1);
+				switch (data->bytes_per_pixel[i]) {
+				case 4:
+					data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
+					data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
+					break;
+				case 2:
+					data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
+					data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
+					break;
+				default:
+					data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
+					data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
+					break;
+				}
+				data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode);
+				data->scatter_gather_row_height = data->scatter_gather_page_height[i];
+			}
+			data->pte_request_per_chunk[i] = bw_div(bw_div(bw_int_to_fixed(dceip->chunk_width), data->scatter_gather_page_width[i]), data->useful_pte_per_pte_request);
+			data->scatter_gather_pte_requests_in_row[i] = bw_div(bw_mul(bw_ceil2(bw_mul(bw_div(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(dceip->chunk_width)), data->pte_request_per_chunk[i]), bw_int_to_fixed(1)), data->scatter_gather_row_height), data->scatter_gather_page_height[i]);
+			data->scatter_gather_pte_requests_in_vblank = bw_mul(data->scatter_gather_pte_request_rows, data->scatter_gather_pte_requests_in_row[i]);
+			if (bw_equ(data->peak_pte_request_to_eviction_ratio_limiting, bw_int_to_fixed(0))) {
+				data->scatter_gather_pte_request_limit[i] = data->scatter_gather_pte_requests_in_vblank;
+			}
+			else {
+				data->scatter_gather_pte_request_limit[i] = bw_max2(dceip->minimum_outstanding_pte_request_limit, bw_min2(data->scatter_gather_pte_requests_in_vblank, bw_ceil2(bw_mul(bw_mul(bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->memory_chunk_size_in_bytes[i]), data->pte_request_per_chunk[i]), data->peak_pte_request_to_eviction_ratio_limiting), bw_int_to_fixed(1))));
+			}
+		}
+	}
+	/*pitch padding recommended for efficiency in linear mode*/
+	/*in linear mode graphics or underlay with scatter gather, a pitch that is a multiple of the channel interleave (256 bytes) times the channel-bank rotation is not efficient*/
+	/*if that is the case it is recommended to pad the pitch by at least 256 pixels*/
+	data->inefficient_linear_pitch_in_bytes = bw_mul(bw_mul(bw_int_to_fixed(256), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels));
+
+	/*pixel transfer time*/
+	/*the dmif and mcifwr yclk(pclk) required is the one that allows the transfer of all pipe's data buffer size in memory in the time for data transfer*/
+	/*for dmif, pte and cursor requests have to be included.*/
+	/*the dram data requirement is doubled when the data request size in bytes is less than the dram channel width times the burst size (8)*/
+	/*the dram data requirement is also multiplied by the number of channels in the case of low power tiling*/
+	/*the page close-open time is determined by trc and the number of page close-opens*/
+	/*in tiled mode graphics or underlay with scatter-gather enabled the bytes per page close-open is the product of the memory line interleave times the maximum of the scatter-gather page width and the product of the tile width (8 pixels) times the number of channels times the number of banks.*/
+	/*in linear mode graphics or underlay with scatter-gather enabled and inefficient pitch, the bytes per page close-open is the line request alternation slice, because different lines are in completely different 4k address bases.*/
+	/*otherwise, the bytes page close-open is the chunk size because that is the arbitration slice.*/
+	/*pte requests are grouped by pte requests per chunk if that is more than 1. each group costs a page close-open time for dmif reads*/
+	/*cursor requests outstanding are limited to a group of two source lines. each group costs a page close-open time for dmif reads*/
+	/*the display reads and writes time for data transfer is the minimum data or cursor buffer size in time minus the mc urgent latency*/
+	/*the mc urgent latency is experienced more than one time if the number of dmif requests in the data buffer exceeds the request buffer size plus the request slots reserved for dmif in the dram channel arbiter queues*/
+	/*the dispclk required is the maximum for all surfaces of the maximum of the source pixels for first output pixel times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, and the source pixels for last output pixel, times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, plus the active time.*/
+	/*the data burst time is the maximum of the total page close-open time, total dmif/mcifwr buffer size in memory divided by the dram bandwidth, and the total dmif/mcifwr buffer size in memory divided by the 32 byte sclk data bus bandwidth, each multiplied by its efficiency.*/
+	/*the source line transfer time is the maximum for all surfaces of the maximum of the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the fist pixel, and the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the last pixel plus the active time.*/
+	/*the source pixels for the first output pixel is 512 if the scaler vertical filter initialization value is greater than 2, and it is 4 times the source width if it is greater than 4.*/
+	/*the source pixels for the last output pixel is the source width times the scaler vertical filter initialization value rounded up to even*/
+	/*the source data for these pixels is the number of pixels times the bytes per pixel times the bytes per request divided by the useful bytes per request.*/
+	data->cursor_total_data = bw_int_to_fixed(0);
+	data->cursor_total_request_groups = bw_int_to_fixed(0);
+	data->scatter_gather_total_pte_requests = bw_int_to_fixed(0);
+	data->scatter_gather_total_pte_request_groups = bw_int_to_fixed(0);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->cursor_total_data = bw_add(data->cursor_total_data, bw_mul(bw_mul(bw_int_to_fixed(2), data->cursor_width_pixels[i]), bw_int_to_fixed(4)));
+			if (dceip->large_cursor == 1) {
+				data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_int_to_fixed((dceip->cursor_max_outstanding_group_num + 1)));
+			}
+			else {
+				data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_ceil2(bw_div(data->cursor_width_pixels[i], dceip->cursor_chunk_width), bw_int_to_fixed(1)));
+			}
+			if (data->scatter_gather_enable_for_pipe[i]) {
+				data->scatter_gather_total_pte_requests = bw_add(data->scatter_gather_total_pte_requests, data->scatter_gather_pte_request_limit[i]);
+				data->scatter_gather_total_pte_request_groups = bw_add(data->scatter_gather_total_pte_request_groups, bw_ceil2(bw_div(data->scatter_gather_pte_request_limit[i], bw_ceil2(data->pte_request_per_chunk[i], bw_int_to_fixed(1))), bw_int_to_fixed(1)));
+			}
+		}
+	}
+	data->tile_width_in_pixels = bw_int_to_fixed(8);
+	data->dmif_total_number_of_data_request_page_close_open = bw_int_to_fixed(0);
+	data->mcifwr_total_number_of_data_request_page_close_open = bw_int_to_fixed(0);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] != bw_def_linear) {
+				data->bytes_per_page_close_open = bw_mul(data->lines_interleaved_in_mem_access[i], bw_max2(bw_mul(bw_mul(bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->tile_width_in_pixels), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels)), bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->scatter_gather_page_width[i])));
+			}
+			else if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] == bw_def_linear && bw_equ(bw_mod((bw_mul(data->pitch_in_pixels_after_surface_type[i], bw_int_to_fixed(data->bytes_per_pixel[i]))), data->inefficient_linear_pitch_in_bytes), bw_int_to_fixed(0))) {
+				data->bytes_per_page_close_open = dceip->linear_mode_line_request_alternation_slice;
+			}
+			else {
+				data->bytes_per_page_close_open = data->memory_chunk_size_in_bytes[i];
+			}
+			if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+				data->dmif_total_number_of_data_request_page_close_open = bw_add(data->dmif_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open));
+			}
+			else {
+				data->mcifwr_total_number_of_data_request_page_close_open = bw_add(data->mcifwr_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open));
+			}
+		}
+	}
+	data->dmif_total_page_close_open_time = bw_div(bw_mul((bw_add(bw_add(data->dmif_total_number_of_data_request_page_close_open, data->scatter_gather_total_pte_request_groups), data->cursor_total_request_groups)), vbios->trc), bw_int_to_fixed(1000));
+	data->mcifwr_total_page_close_open_time = bw_div(bw_mul(data->mcifwr_total_number_of_data_request_page_close_open, vbios->trc), bw_int_to_fixed(1000));
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->adjusted_data_buffer_size_in_memory[i] = bw_div(bw_mul(data->adjusted_data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+		}
+	}
+	data->total_requests_for_adjusted_dmif_size = bw_int_to_fixed(0);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+				data->total_requests_for_adjusted_dmif_size = bw_add(data->total_requests_for_adjusted_dmif_size, bw_div(data->adjusted_data_buffer_size[i], data->useful_bytes_per_request[i]));
+			}
+		}
+	}
+	data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
+	data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips);
+	data->total_display_reads_required_data = bw_int_to_fixed(0);
+	data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0);
+	data->total_display_writes_required_data = bw_int_to_fixed(0);
+	data->total_display_writes_required_dram_access_data = bw_int_to_fixed(0);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+				data->display_reads_required_data = data->adjusted_data_buffer_size_in_memory[i];
+				/*for hbm memories, each channel is split into 2 pseudo-channels that are each 64 bits in width.  each*/
+				/*pseudo-channel may be read independently of one another.*/
+				/*the read burst length (bl) for hbm memories is 4, so each read command will access 32 bytes of data.*/
+				/*the 64 or 32 byte sized data is stored in one pseudo-channel.*/
+				/*it will take 4 memclk cycles or 8 yclk cycles to fetch 64 bytes of data from the hbm memory (2 read commands).*/
+				/*it will take 2 memclk cycles or 4 yclk cycles to fetch 32 bytes of data from the hbm memory (1 read command).*/
+				/*for gddr5/ddr4 memories, there is additional overhead if the size of the request is smaller than 64 bytes.*/
+				/*the read burst length (bl) for gddr5/ddr4 memories is 8, regardless of the size of the data request.*/
+				/*therefore it will require 8 cycles to fetch 64 or 32 bytes of data from the memory.*/
+				/*the memory efficiency will be 50% for the 32 byte sized data.*/
+				if (vbios->memory_type == bw_def_hbm) {
+					data->display_reads_required_dram_access_data = data->adjusted_data_buffer_size_in_memory[i];
+				}
+				else {
+					data->display_reads_required_dram_access_data = bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed((8 * vbios->dram_channel_width_in_bits / 8)), data->bytes_per_request[i]), bw_int_to_fixed(1)));
+				}
+				data->total_display_reads_required_data = bw_add(data->total_display_reads_required_data, data->display_reads_required_data);
+				data->total_display_reads_required_dram_access_data = bw_add(data->total_display_reads_required_dram_access_data, data->display_reads_required_dram_access_data);
+			}
+			else {
+				data->total_display_writes_required_data = bw_add(data->total_display_writes_required_data, data->adjusted_data_buffer_size_in_memory[i]);
+				data->total_display_writes_required_dram_access_data = bw_add(data->total_display_writes_required_dram_access_data, bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits), data->bytes_per_request[i]), bw_int_to_fixed(1))));
+			}
+		}
+	}
+	data->total_display_reads_required_data = bw_add(bw_add(data->total_display_reads_required_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64)));
+	data->total_display_reads_required_dram_access_data = bw_add(bw_add(data->total_display_reads_required_dram_access_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64)));
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(4))) {
+				data->src_pixels_for_first_output_pixel[i] = bw_mul(bw_int_to_fixed(4), data->source_width_rounded_up_to_chunks[i]);
+			}
+			else {
+				if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(2))) {
+					data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(512);
+				}
+				else {
+					data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(0);
+				}
+			}
+			data->src_data_for_first_output_pixel[i] = bw_div(bw_mul(bw_mul(data->src_pixels_for_first_output_pixel[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+			data->src_pixels_for_last_output_pixel[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_mul(bw_ceil2(data->vsr[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->horizontal_blank_and_chunk_granularity_factor[i])));
+			data->src_data_for_last_output_pixel[i] = bw_div(bw_mul(bw_mul(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->lines_interleaved_in_mem_access[i])), bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+			data->active_time[i] = bw_div(bw_div(data->source_width_rounded_up_to_chunks[i], data->hsr[i]), data->pixel_rate[i]);
+		}
+	}
+	for (i = 0; i <= 2; i++) {
+		for (j = 0; j <= 7; j++) {
+			data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
+			if (data->d1_display_write_back_dwb_enable == 1) {
+				data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
+			}
+		}
+	}
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		for (j = 0; j <= 2; j++) {
+			for (k = 0; k <= 7; k++) {
+				if (data->enable[i]) {
+					if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+						/*time to transfer data from the dmif buffer to the lb.  since the mc to dmif transfer time overlaps*/
+						/*with the dmif to lb transfer time, only time to transfer the last chunk  is considered.*/
+						data->dmif_buffer_transfer_time[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], (bw_div(dceip->lb_write_pixels_per_dispclk, (bw_div(vbios->low_voltage_max_dispclk, dceip->display_pipe_throughput_factor)))));
+						data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_add(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->dmif_buffer_transfer_time[i]), data->active_time[i]));
+						/*during an mclk switch the requests from the dce ip are stored in the gmc/arb.  these requests should be serviced immediately*/
+						/*after the mclk switch sequence and not incur an urgent latency penalty.  it is assumed that the gmc/arb can hold up to 256 requests*/
+						/*per memory channel.  if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/
+						/*immediately serviced without a gap in the urgent requests.*/
+						/*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/
+						if (surface_type[i] == bw_def_graphics) {
+							switch (data->lb_bpc[i]) {
+							case 6:
+								data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component;
+								break;
+							case 8:
+								data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component;
+								break;
+							case 10:
+								data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component;
+								break;
+							default:
+								data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component;
+								break;
+							}
+							if (data->use_alpha[i] == 1) {
+								data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency);
+							}
+						}
+						else {
+							switch (data->lb_bpc[i]) {
+							case 6:
+								data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component;
+								break;
+							case 8:
+								data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component;
+								break;
+							case 10:
+								data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component;
+								break;
+							default:
+								data->v_scaler_efficiency = bw_int_to_fixed(3);
+								break;
+							}
+						}
+						if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) {
+							data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i]));
+						}
+						else {
+							data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1))));
+						}
+						data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_mul(bw_int_to_fixed(2), bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]))))));
+					}
+					else {
+						data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]));
+						/*during an mclk switch the requests from the dce ip are stored in the gmc/arb.  these requests should be serviced immediately*/
+						/*after the mclk switch sequence and not incur an urgent latency penalty.  it is assumed that the gmc/arb can hold up to 256 requests*/
+						/*per memory channel.  if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/
+						/*immediately serviced without a gap in the urgent requests.*/
+						/*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/
+						data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i])))));
+					}
+				}
+			}
+		}
+	}
+	/*cpu c-state and p-state change enable*/
+	/*for cpu p-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration*/
+	/*for cpu c-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration and recovery*/
+	/*condition for the blackout duration:*/
+	/* minimum latency hiding > blackout duration + dmif burst time + line source transfer time*/
+	/*condition for the blackout recovery:*/
+	/* recovery time >  dmif burst time + 2 * urgent latency*/
+	/* recovery time > (display bw * blackout duration  + (2 * urgent latency + dmif burst time)*dispclk - dmif size )*/
+	/*                  / (dispclk - display bw)*/
+	/*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/
+	/*the minimum latency hiding is  further limited by the cursor.  the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) {
+				if (bw_ltn(data->vsr[i], bw_int_to_fixed(2))) {
+					data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(1))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]);
+				}
+				else {
+					data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(3))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]);
+				}
+			}
+			else {
+				data->cursor_latency_hiding[i] = bw_int_to_fixed(9999);
+			}
+		}
+	}
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) {
+				data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
+			}
+			else {
+				data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
+			}
+			data->minimum_latency_hiding_with_cursor[i] = bw_min2(data->minimum_latency_hiding[i], data->cursor_latency_hiding[i]);
+		}
+	}
+	for (i = 0; i <= 2; i++) {
+		for (j = 0; j <= 7; j++) {
+			data->blackout_duration_margin[i][j] = bw_int_to_fixed(9999);
+			data->dispclk_required_for_blackout_duration[i][j] = bw_int_to_fixed(0);
+			data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(0);
+			for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+				if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0))) {
+					if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+						data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->line_source_transfer_time[k][i][j]));
+						data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->active_time[k]))));
+						if (bw_leq(vbios->maximum_blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))) {
+							data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999);
+						}
+						else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) {
+							data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, bw_sub(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k]));
+						}
+					}
+					else {
+						data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->line_source_transfer_time[k][i][j]));
+						data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
+						if (bw_ltn(vbios->maximum_blackout_recovery_time, bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))) {
+							data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999);
+						}
+						else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) {
+							data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, (bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k]));
+						}
+					}
+				}
+			}
+		}
+	}
+	if (bw_mtn(data->blackout_duration_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[high][s_high], vbios->high_voltage_max_dispclk)) {
+		data->cpup_state_change_enable = bw_def_yes;
+		if (bw_ltn(data->dispclk_required_for_blackout_recovery[high][s_high], vbios->high_voltage_max_dispclk)) {
+			data->cpuc_state_change_enable = bw_def_yes;
+		}
+		else {
+			data->cpuc_state_change_enable = bw_def_no;
+		}
+	}
+	else {
+		data->cpup_state_change_enable = bw_def_no;
+		data->cpuc_state_change_enable = bw_def_no;
+	}
+	/*nb p-state change enable*/
+	/*for dram speed/p-state change to be possible for a yclk(pclk) and sclk level there has to be positive margin and the dispclk required has to be*/
+	/*below the maximum.*/
+	/*the dram speed/p-state change margin is the minimum for all surfaces of the maximum latency hiding minus the dram speed/p-state change latency,*/
+	/*minus the dmif burst time, minus the source line transfer time*/
+	/*the maximum latency hiding is the minimum latency hiding plus one source line used for de-tiling in the line buffer, plus half the urgent latency*/
+	/*if stutter and dram clock state change are gated before cursor then the cursor latency hiding does not limit stutter or dram clock state change*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if ((dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) {
+				data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
+			}
+			else {
+				/*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) * h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/
+				data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
+			}
+			data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]);
+		}
+	}
+	/*initialize variables*/
+	number_of_displays_enabled = 0;
+	number_of_displays_enabled_with_margin = 0;
+	for (k = 0; k < maximum_number_of_surfaces; k++) {
+		if (data->enable[k]) {
+			number_of_displays_enabled = number_of_displays_enabled + 1;
+		}
+	}
+	data->display_pstate_change_enable[maximum_number_of_surfaces - 1] = 0;
+	for (i = 0; i <= 2; i++) {
+		for (j = 0; j <= 7; j++) {
+			data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999);
+			data->dram_speed_change_margin = bw_int_to_fixed(9999);
+			data->dispclk_required_for_dram_speed_change[i][j] = bw_int_to_fixed(0);
+			data->num_displays_with_margin[i][j] = 0;
+			for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+				if (data->enable[k]) {
+					if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+						data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]);
+						if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
+							/*determine the minimum dram clock change margin for each set of clock frequencies*/
+							data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
+							/*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
+							data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
+							if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
+								data->display_pstate_change_enable[k] = 1;
+								data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
+							}
+						}
+					}
+					else {
+						data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]);
+						if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
+							/*determine the minimum dram clock change margin for each display pipe*/
+							data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
+							/*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
+							data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
+							if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
+								data->display_pstate_change_enable[k] = 1;
+								data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
+							}
+						}
+					}
+				}
+			}
+		}
+	}
+	/*determine the number of displays with margin to switch in the v_active region*/
+	for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+		if ((data->enable[k] == 1 && data->display_pstate_change_enable[k] == 1)) {
+			number_of_displays_enabled_with_margin = number_of_displays_enabled_with_margin + 1;
+		}
+	}
+	/*determine the number of displays that don't have any dram clock change margin, but*/
+	/*have the same resolution.  these displays can switch in a common vblank region if*/
+	/*their frames are aligned.*/
+	data->min_vblank_dram_speed_change_margin = bw_int_to_fixed(9999);
+	for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+		if (data->enable[k]) {
+			if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+				data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]);
+				data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]);
+			}
+			else {
+				data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->mcifwr_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]);
+				data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]);
+			}
+		}
+	}
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		data->displays_with_same_mode[i] = bw_int_to_fixed(0);
+		if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) {
+			for (j = 0; j <= maximum_number_of_surfaces - 1; j++) {
+				if ((data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
+					data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1));
+				}
+			}
+		}
+	}
+	/*compute the maximum number of aligned displays with no margin*/
+	number_of_aligned_displays_with_no_margin = 0;
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		number_of_aligned_displays_with_no_margin = bw_fixed_to_int(bw_max2(bw_int_to_fixed(number_of_aligned_displays_with_no_margin), data->displays_with_same_mode[i]));
+	}
+	/*dram clock change is possible, if all displays have positive margin except for one display or a group of*/
+	/*aligned displays with the same timing.*/
+	/*the display(s) with the negative margin can be switched in the v_blank region while the other*/
+	/*displays are in v_blank or v_active.*/
+	if ((number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk))) {
+		data->nbp_state_change_enable = bw_def_yes;
+	}
+	else {
+		data->nbp_state_change_enable = bw_def_no;
+	}
+	/*dram clock change is possible only in vblank if all displays are aligned and have no margin*/
+	if ((number_of_aligned_displays_with_no_margin == number_of_displays_enabled)) {
+		nbp_state_change_enable_blank = bw_def_yes;
+	}
+	else {
+		nbp_state_change_enable_blank = bw_def_no;
+	}
+	/*required yclk(pclk)*/
+	/*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/
+	/*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/
+	/*high yclk(pclk) has to be selected when dram speed/p-state change is not possible.*/
+	data->min_cursor_memory_interface_buffer_size_in_time = bw_int_to_fixed(9999);
+	/* number of cursor lines stored in the cursor data return buffer*/
+	num_cursor_lines = 0;
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0))) {
+				/*compute number of cursor lines stored in data return buffer*/
+				if (bw_leq(data->cursor_width_pixels[i], bw_int_to_fixed(64)) && dceip->large_cursor == 1) {
+					num_cursor_lines = 4;
+				}
+				else {
+					num_cursor_lines = 2;
+				}
+				data->min_cursor_memory_interface_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, bw_div(bw_mul(bw_div(bw_int_to_fixed(num_cursor_lines), data->vsr[i]), data->h_total[i]), data->pixel_rate[i]));
+			}
+		}
+	}
+	/*compute minimum time to read one chunk from the dmif buffer*/
+	if ((number_of_displays_enabled > 2)) {
+		data->chunk_request_delay = 0;
+	}
+	else {
+		data->chunk_request_delay = bw_fixed_to_int(bw_div(bw_int_to_fixed(512), vbios->high_voltage_max_dispclk));
+	}
+	data->min_read_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, data->min_dmif_size_in_time);
+	data->display_reads_time_for_data_transfer = bw_sub(bw_sub(data->min_read_buffer_size_in_time, data->total_dmifmc_urgent_latency), bw_int_to_fixed(data->chunk_request_delay));
+	data->display_writes_time_for_data_transfer = bw_sub(data->min_mcifwr_size_in_time, vbios->mcifwrmc_urgent_latency);
+	data->dmif_required_dram_bandwidth = bw_div(data->total_display_reads_required_dram_access_data, data->display_reads_time_for_data_transfer);
+	data->mcifwr_required_dram_bandwidth = bw_div(data->total_display_writes_required_dram_access_data, data->display_writes_time_for_data_transfer);
+	data->required_dmifmc_urgent_latency_for_page_close_open = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_total_page_close_open_time)), data->total_dmifmc_urgent_trips);
+	data->required_mcifmcwr_urgent_latency = bw_sub(data->min_mcifwr_size_in_time, data->mcifwr_total_page_close_open_time);
+	if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
+		data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999);
+		yclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
+		data->y_clk_level = high;
+		data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+	}
+	else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) {
+		data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999);
+		yclk_message = bw_def_exceeded_allowed_page_close_open;
+		data->y_clk_level = high;
+		data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+	}
+	else {
+		data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000));
+		if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) {
+			yclk_message = bw_fixed_to_int(vbios->low_yclk);
+			data->y_clk_level = low;
+			data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+		}
+		else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) {
+			yclk_message = bw_fixed_to_int(vbios->mid_yclk);
+			data->y_clk_level = mid;
+			data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+		}
+		else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) {
+			yclk_message = bw_fixed_to_int(vbios->high_yclk);
+			data->y_clk_level = high;
+			data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+		}
+		else {
+			yclk_message = bw_def_exceeded_allowed_maximum_bw;
+			data->y_clk_level = high;
+			data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+		}
+	}
+	/*required sclk*/
+	/*sclk requirement only makes sense if the total pte requests fit in the scatter-gather saw queque size*/
+	/*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/
+	/*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/
+	/*for dmif, pte and cursor requests have to be included.*/
+	data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+	data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+	if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
+		data->required_sclk = bw_int_to_fixed(9999);
+		sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
+		data->sclk_level = s_high;
+	}
+	else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) {
+		data->required_sclk = bw_int_to_fixed(9999);
+		sclk_message = bw_def_exceeded_allowed_page_close_open;
+		data->sclk_level = s_high;
+	}
+	else {
+		data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk);
+		if (bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) {
+			sclk_message = bw_def_low;
+			data->sclk_level = s_low;
+			data->required_sclk = vbios->low_sclk;
+		}
+		else if (bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) {
+			sclk_message = bw_def_mid;
+			data->sclk_level = s_mid1;
+			data->required_sclk = vbios->mid1_sclk;
+		}
+		else if (bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) {
+			sclk_message = bw_def_mid;
+			data->sclk_level = s_mid2;
+			data->required_sclk = vbios->mid2_sclk;
+		}
+		else if (bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) {
+			sclk_message = bw_def_mid;
+			data->sclk_level = s_mid3;
+			data->required_sclk = vbios->mid3_sclk;
+		}
+		else if (bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) {
+			sclk_message = bw_def_mid;
+			data->sclk_level = s_mid4;
+			data->required_sclk = vbios->mid4_sclk;
+		}
+		else if (bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) {
+			sclk_message = bw_def_mid;
+			data->sclk_level = s_mid5;
+			data->required_sclk = vbios->mid5_sclk;
+		}
+		else if (bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) {
+			sclk_message = bw_def_mid;
+			data->sclk_level = s_mid6;
+			data->required_sclk = vbios->mid6_sclk;
+		}
+		else if (bw_ltn(data->required_sclk, sclk[s_high])) {
+			sclk_message = bw_def_high;
+			data->sclk_level = s_high;
+			data->required_sclk = vbios->high_sclk;
+		}
+		else {
+			sclk_message = bw_def_exceeded_allowed_maximum_sclk;
+			data->sclk_level = s_high;
+			/*required_sclk = high_sclk*/
+		}
+	}
+	/*dispclk*/
+	/*if dispclk is set to the maximum, ramping is not required.  dispclk required without ramping is less than the dispclk required with ramping.*/
+	/*if dispclk required without ramping is more than the maximum dispclk, that is the dispclk required, and the mode is not supported*/
+	/*if that does not happen, but dispclk required with ramping is more than the maximum dispclk, dispclk required is just the maximum dispclk*/
+	/*if that does not happen either, dispclk required is the dispclk required with ramping.*/
+	/*dispclk required without ramping is the maximum of the one required for display pipe pixel throughput, for scaler throughput, for total read request thrrougput and for dram/np p-state change if enabled.*/
+	/*the display pipe pixel throughput is the maximum of lines in per line out in the beginning of the frame and lines in per line out in the middle of the frame multiplied by the horizontal blank and chunk granularity factor, altogether multiplied by the ratio of the source width to the line time, divided by the line buffer pixels per dispclk throughput, and multiplied by the display pipe throughput factor.*/
+	/*the horizontal blank and chunk granularity factor is the ratio of the line time divided by the line time minus half the horizontal blank and chunk time.  it applies when the lines in per line out is not 2 or 4.*/
+	/*the dispclk required for scaler throughput is the product of the pixel rate and the scaling limits factor.*/
+	/*the dispclk required for total read request throughput is the product of the peak request-per-second bandwidth and the dispclk cycles per request, divided by the request efficiency.*/
+	/*for the dispclk required with ramping, instead of multiplying just the pipe throughput by the display pipe throughput factor, we multiply the scaler and pipe throughput by the ramping factor.*/
+	/*the scaling limits factor is the product of the horizontal scale ratio, and the ratio of the vertical taps divided by the scaler efficiency clamped to at least 1.*/
+	/*the scaling limits factor itself it also clamped to at least 1*/
+	/*if doing downscaling with the pre-downscaler enabled, the horizontal scale ratio should not be considered above (use "1")*/
+	data->downspread_factor = bw_add(bw_int_to_fixed(1), bw_div(vbios->down_spread_percentage, bw_int_to_fixed(100)));
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (surface_type[i] == bw_def_graphics) {
+				switch (data->lb_bpc[i]) {
+				case 6:
+					data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component;
+					break;
+				case 8:
+					data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component;
+					break;
+				case 10:
+					data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component;
+					break;
+				default:
+					data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component;
+					break;
+				}
+				if (data->use_alpha[i] == 1) {
+					data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency);
+				}
+			}
+			else {
+				switch (data->lb_bpc[i]) {
+				case 6:
+					data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component;
+					break;
+				case 8:
+					data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component;
+					break;
+				case 10:
+					data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component;
+					break;
+				default:
+					data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency12_bit_per_component;
+					break;
+				}
+			}
+			if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) {
+				data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i]));
+			}
+			else {
+				data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1))));
+			}
+			data->display_pipe_pixel_throughput = bw_div(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], bw_mul(data->lb_lines_in_per_line_out_in_middle_of_frame[i], data->horizontal_blank_and_chunk_granularity_factor[i])), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), dceip->lb_write_pixels_per_dispclk);
+			data->dispclk_required_without_ramping[i] = bw_mul(data->downspread_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), bw_mul(dceip->display_pipe_throughput_factor, data->display_pipe_pixel_throughput)));
+			data->dispclk_required_with_ramping[i] = bw_mul(dceip->dispclk_ramping_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), data->display_pipe_pixel_throughput));
+		}
+	}
+	data->total_dispclk_required_with_ramping = bw_int_to_fixed(0);
+	data->total_dispclk_required_without_ramping = bw_int_to_fixed(0);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (bw_ltn(data->total_dispclk_required_with_ramping, data->dispclk_required_with_ramping[i])) {
+				data->total_dispclk_required_with_ramping = data->dispclk_required_with_ramping[i];
+			}
+			if (bw_ltn(data->total_dispclk_required_without_ramping, data->dispclk_required_without_ramping[i])) {
+				data->total_dispclk_required_without_ramping = data->dispclk_required_without_ramping[i];
+			}
+		}
+	}
+	data->total_read_request_bandwidth = bw_int_to_fixed(0);
+	data->total_write_request_bandwidth = bw_int_to_fixed(0);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+				data->total_read_request_bandwidth = bw_add(data->total_read_request_bandwidth, data->request_bandwidth[i]);
+			}
+			else {
+				data->total_write_request_bandwidth = bw_add(data->total_write_request_bandwidth, data->request_bandwidth[i]);
+			}
+		}
+	}
+	data->dispclk_required_for_total_read_request_bandwidth = bw_div(bw_mul(data->total_read_request_bandwidth, dceip->dispclk_per_request), dceip->request_efficiency);
+	data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping, data->dispclk_required_for_total_read_request_bandwidth);
+	data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping, data->dispclk_required_for_total_read_request_bandwidth);
+	if (data->cpuc_state_change_enable == bw_def_yes) {
+		data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]);
+		data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]);
+	}
+	if (data->cpup_state_change_enable == bw_def_yes) {
+		data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
+		data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
+	}
+	if (data->nbp_state_change_enable == bw_def_yes) {
+		data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
+		data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
+	}
+	if (bw_ltn(data->total_dispclk_required_with_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) {
+		data->dispclk = data->total_dispclk_required_with_ramping_with_request_bandwidth;
+	}
+	else if (bw_ltn(data->total_dispclk_required_without_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) {
+		data->dispclk = vbios->high_voltage_max_dispclk;
+	}
+	else {
+		data->dispclk = data->total_dispclk_required_without_ramping_with_request_bandwidth;
+	}
+	/* required core voltage*/
+	/* the core voltage required is low if sclk, yclk(pclk)and dispclk are within the low limits*/
+	/* otherwise, the core voltage required is medium if yclk (pclk) is within the low limit and sclk and dispclk are within the medium limit*/
+	/* otherwise, the core voltage required is high if the three clocks are within the high limits*/
+	/* otherwise, or if the mode is not supported, core voltage requirement is not applicable*/
+	if (pipe_check == bw_def_notok) {
+		voltage = bw_def_na;
+	}
+	else if (mode_check == bw_def_notok) {
+		voltage = bw_def_notok;
+	}
+	else if (bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) && sclk_message == bw_def_low && bw_ltn(data->dispclk, vbios->low_voltage_max_dispclk)) {
+		voltage = bw_def_0_72;
+	}
+	else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid) && bw_ltn(data->dispclk, vbios->mid_voltage_max_dispclk)) {
+		voltage = bw_def_0_8;
+	}
+	else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->high_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid || sclk_message == bw_def_high) && bw_leq(data->dispclk, vbios->high_voltage_max_dispclk)) {
+		if ((data->nbp_state_change_enable == bw_def_no && nbp_state_change_enable_blank == bw_def_no)) {
+			voltage = bw_def_high_no_nbp_state_change;
+		}
+		else {
+			voltage = bw_def_0_9;
+		}
+	}
+	else {
+		voltage = bw_def_notok;
+	}
+	if (voltage == bw_def_0_72) {
+		data->max_phyclk = vbios->low_voltage_max_phyclk;
+	}
+	else if (voltage == bw_def_0_8) {
+		data->max_phyclk = vbios->mid_voltage_max_phyclk;
+	}
+	else {
+		data->max_phyclk = vbios->high_voltage_max_phyclk;
+	}
+	/*required blackout recovery time*/
+	data->blackout_recovery_time = bw_int_to_fixed(0);
+	for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+		if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0)) && data->cpup_state_change_enable == bw_def_yes) {
+			if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+				data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level]));
+				if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])))))) {
+					data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k])))));
+				}
+			}
+			else {
+				data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]));
+				if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])))))) {
+					data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k])))));
+				}
+			}
+		}
+	}
+	/*sclk deep sleep*/
+	/*during self-refresh, sclk can be reduced to dispclk divided by the minimum pixels in the data fifo entry, with 15% margin, but shoudl not be set to less than the request bandwidth.*/
+	/*the data fifo entry is 16 pixels for the writeback, 64 bytes/bytes_per_pixel for the graphics, 16 pixels for the parallel rotation underlay,*/
+	/*and 16 bytes/bytes_per_pixel for the orthogonal rotation underlay.*/
+	/*in parallel mode (underlay pipe), the data read from the dmifv buffer is variable and based on the pixel depth (8bbp - 16 bytes, 16 bpp - 32 bytes, 32 bpp - 64 bytes)*/
+	/*in orthogonal mode (underlay pipe), the data read from the dmifv buffer is fixed at 16 bytes.*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
+				data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16);
+			}
+			else if (surface_type[i] == bw_def_graphics) {
+				data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(64), bw_int_to_fixed(data->bytes_per_pixel[i]));
+			}
+			else if (data->orthogonal_rotation[i] == 0) {
+				data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16);
+			}
+			else {
+				data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(16), bw_int_to_fixed(data->bytes_per_pixel[i]));
+			}
+		}
+	}
+	data->min_pixels_per_data_fifo_entry = bw_int_to_fixed(9999);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (bw_mtn(data->min_pixels_per_data_fifo_entry, data->pixels_per_data_fifo_entry[i])) {
+				data->min_pixels_per_data_fifo_entry = data->pixels_per_data_fifo_entry[i];
+			}
+		}
+	}
+	data->sclk_deep_sleep = bw_max2(bw_div(bw_mul(data->dispclk, bw_frc_to_fixed(115, 100)), data->min_pixels_per_data_fifo_entry), data->total_read_request_bandwidth);
+	/*urgent, stutter and nb-p_state watermark*/
+	/*the urgent watermark is the maximum of the urgent trip time plus the pixel transfer time, the urgent trip times to get data for the first pixel, and the urgent trip times to get data for the last pixel.*/
+	/*the stutter exit watermark is the self refresh exit time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel.  it does not apply to the writeback.*/
+	/*the nb p-state change watermark is the dram speed/p-state change time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel.*/
+	/*the pixel transfer time is the maximum of the time to transfer the source pixels required for the first output pixel, and the time to transfer the pixels for the last output pixel minus the active line time.*/
+	/*blackout_duration is added to the urgent watermark*/
+	data->chunk_request_time = bw_int_to_fixed(0);
+	data->cursor_request_time = bw_int_to_fixed(0);
+	/*compute total time to request one chunk from each active display pipe*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->chunk_request_time = bw_add(data->chunk_request_time, (bw_div((bw_div(bw_int_to_fixed(pixels_per_chunk * data->bytes_per_pixel[i]), data->useful_bytes_per_request[i])), bw_min2(sclk[data->sclk_level], bw_div(data->dispclk, bw_int_to_fixed(2))))));
+		}
+	}
+	/*compute total time to request cursor data*/
+	data->cursor_request_time = (bw_div(data->cursor_total_data, (bw_mul(bw_int_to_fixed(32), sclk[data->sclk_level]))));
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->line_source_pixels_transfer_time = bw_max2(bw_div(bw_div(data->src_pixels_for_first_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), bw_sub(bw_div(bw_div(data->src_pixels_for_last_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), data->active_time[i]));
+			if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+				data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time);
+				data->stutter_exit_watermark[i] = bw_add(bw_sub(vbios->stutter_self_refresh_exit_latency, data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
+				data->stutter_entry_watermark[i] = bw_add(bw_sub(bw_add(vbios->stutter_self_refresh_exit_latency, vbios->stutter_self_refresh_entry_latency), data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
+				/*unconditionally remove black out time from the nb p_state watermark*/
+				if ((data->display_pstate_change_enable[i] == 1)) {
+					data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
+				}
+				else {
+					/*maximize the watermark to force the switch in the vb_lank region of the frame*/
+					data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000);
+				}
+			}
+			else {
+				data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time);
+				data->stutter_exit_watermark[i] = bw_int_to_fixed(0);
+				data->stutter_entry_watermark[i] = bw_int_to_fixed(0);
+				if ((data->display_pstate_change_enable[i] == 1)) {
+					data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
+				}
+				else {
+					/*maximize the watermark to force the switch in the vb_lank region of the frame*/
+					data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000);
+				}
+			}
+		}
+	}
+	/*stutter mode enable*/
+	/*in the multi-display case the stutter exit or entry watermark cannot exceed the minimum latency hiding capabilities of the*/
+	/*display pipe.*/
+	data->stutter_mode_enable = data->cpuc_state_change_enable;
+	if (data->number_of_displays > 1) {
+		for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+			if (data->enable[i]) {
+				if ((bw_mtn(data->stutter_exit_watermark[i], data->minimum_latency_hiding[i]) || bw_mtn(data->stutter_entry_watermark[i], data->minimum_latency_hiding[i]))) {
+					data->stutter_mode_enable = bw_def_no;
+				}
+			}
+		}
+	}
+	/*performance metrics*/
+	/* display read access efficiency (%)*/
+	/* display write back access efficiency (%)*/
+	/* stutter efficiency (%)*/
+	/* extra underlay pitch recommended for efficiency (pixels)*/
+	/* immediate flip time (us)*/
+	/* latency for other clients due to urgent display read (us)*/
+	/* latency for other clients due to urgent display write (us)*/
+	/* average bandwidth consumed by display (no compression) (gb/s)*/
+	/* required dram  bandwidth (gb/s)*/
+	/* required sclk (m_hz)*/
+	/* required rd urgent latency (us)*/
+	/* nb p-state change margin (us)*/
+	/*dmif and mcifwr dram access efficiency*/
+	/*is the ratio between the ideal dram access time (which is the data buffer size in memory divided by the dram bandwidth), and the actual time which is the total page close-open time.  but it cannot exceed the dram efficiency provided by the memory subsystem*/
+	data->dmifdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_reads_required_dram_access_data, data->dram_bandwidth), data->dmif_total_page_close_open_time), bw_int_to_fixed(1));
+	if (bw_mtn(data->total_display_writes_required_dram_access_data, bw_int_to_fixed(0))) {
+		data->mcifwrdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_writes_required_dram_access_data, data->dram_bandwidth), data->mcifwr_total_page_close_open_time), bw_int_to_fixed(1));
+	}
+	else {
+		data->mcifwrdram_access_efficiency = bw_int_to_fixed(0);
+	}
+	/*average bandwidth*/
+	/*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
+	/*the average bandwidth with compression is the same, divided by the compression ratio*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+			data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
+		}
+	}
+	data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
+	data->total_average_bandwidth = bw_int_to_fixed(0);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
+			data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
+		}
+	}
+	/*stutter efficiency*/
+	/*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration.  only applies if the display write-back is not enabled.*/
+	/*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/
+	/*the frame-average time in self-refresh is the stutter cycle minus the self refresh exit latency and the burst time*/
+	/*the stutter cycle is the dmif buffer size reduced by the excess of the stutter exit watermark over the lb size in time.*/
+	/*the burst time is the data needed during the stutter cycle divided by the available bandwidth*/
+	/*compute the time read all the data from the dmif buffer to the lb (dram refresh period)*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->stutter_refresh_duration[i] = bw_sub(bw_mul(bw_div(bw_div(bw_mul(bw_div(bw_div(data->adjusted_data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]), bw_max2(bw_int_to_fixed(0), bw_sub(data->stutter_exit_watermark[i], bw_div(bw_mul((bw_sub(data->lb_partitions[i], bw_int_to_fixed(1))), data->h_total[i]), data->pixel_rate[i]))));
+			data->stutter_dmif_buffer_size[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(bw_mul(data->stutter_refresh_duration[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]);
+		}
+	}
+	data->min_stutter_refresh_duration = bw_int_to_fixed(9999);
+	data->total_stutter_dmif_buffer_size = 0;
+	data->total_bytes_requested = 0;
+	data->min_stutter_dmif_buffer_size = 9999;
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (bw_mtn(data->min_stutter_refresh_duration, data->stutter_refresh_duration[i])) {
+				data->min_stutter_refresh_duration = data->stutter_refresh_duration[i];
+				data->total_bytes_requested = bw_fixed_to_int(bw_add(bw_int_to_fixed(data->total_bytes_requested), (bw_mul(bw_mul(data->source_height_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[i]), bw_int_to_fixed(data->bytes_per_pixel[i])))));
+				data->min_stutter_dmif_buffer_size = bw_fixed_to_int(data->stutter_dmif_buffer_size[i]);
+			}
+			data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size)));
+		}
+	}
+	data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_min2(bw_mul(data->dram_bandwidth, data->dmifdram_access_efficiency), bw_mul(sclk[data->sclk_level], bw_int_to_fixed(32))));
+	data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size;
+	data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time);
+	data->time_in_self_refresh = data->min_stutter_refresh_duration;
+	if (data->d1_display_write_back_dwb_enable == 1) {
+		data->stutter_efficiency = bw_int_to_fixed(0);
+	}
+	else if (bw_ltn(data->time_in_self_refresh, bw_int_to_fixed(0))) {
+		data->stutter_efficiency = bw_int_to_fixed(0);
+	}
+	else {
+		/*compute stutter efficiency assuming 60 hz refresh rate*/
+		data->stutter_efficiency = bw_max2(bw_int_to_fixed(0), bw_mul((bw_sub(bw_int_to_fixed(1), (bw_div(bw_mul((bw_add(vbios->stutter_self_refresh_exit_latency, data->stutter_burst_time)), bw_int_to_fixed(data->num_stutter_bursts)), bw_frc_to_fixed(166666667, 10000))))), bw_int_to_fixed(100)));
+	}
+	/*immediate flip time*/
+	/*if scatter gather is enabled, the immediate flip takes a number of urgent memory trips equivalent to the pte requests in a row divided by the pte request limit.*/
+	/*otherwise, it may take just one urgenr memory trip*/
+	data->worst_number_of_trips_to_memory = bw_int_to_fixed(1);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) {
+			data->number_of_trips_to_memory_for_getting_apte_row[i] = bw_ceil2(bw_div(data->scatter_gather_pte_requests_in_row[i], data->scatter_gather_pte_request_limit[i]), bw_int_to_fixed(1));
+			if (bw_ltn(data->worst_number_of_trips_to_memory, data->number_of_trips_to_memory_for_getting_apte_row[i])) {
+				data->worst_number_of_trips_to_memory = data->number_of_trips_to_memory_for_getting_apte_row[i];
+			}
+		}
+	}
+	data->immediate_flip_time = bw_mul(data->worst_number_of_trips_to_memory, data->total_dmifmc_urgent_latency);
+	/*worst latency for other clients*/
+	/*it is the urgent latency plus the urgent burst time*/
+	data->latency_for_non_dmif_clients = bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]);
+	if (data->d1_display_write_back_dwb_enable == 1) {
+		data->latency_for_non_mcifwr_clients = bw_add(vbios->mcifwrmc_urgent_latency, dceip->mcifwr_all_surfaces_burst_time);
+	}
+	else {
+		data->latency_for_non_mcifwr_clients = bw_int_to_fixed(0);
+	}
+	/*dmif mc urgent latency suppported in high sclk and yclk*/
+	data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_burst_time[high][s_high])), data->total_dmifmc_urgent_trips);
+	/*dram speed/p-state change margin*/
+	/*in the multi-display case the nb p-state change watermark cannot exceed the average lb size plus the dmif size or the cursor dcp buffer size*/
+	data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999);
+	data->nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->nbp_state_dram_speed_change_latency_supported = bw_min2(data->nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(data->maximum_latency_hiding_with_cursor[i], data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency));
+			data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_min2(data->v_blank_nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[i], bw_sub(bw_div(data->src_height[i], data->v_scale_ratio[i]), bw_int_to_fixed(4)))), data->h_total[i]), data->pixel_rate[i]), data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency));
+		}
+	}
+	/*sclk required vs urgent latency*/
+	for (i = 1; i <= 5; i++) {
+		data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i)));
+		if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) {
+			data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+		}
+		else {
+			data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na);
+		}
+	}
+	/*output link bit per pixel supported*/
+	for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+		data->output_bpphdmi[k] = bw_def_na;
+		data->output_bppdp4_lane_hbr[k] = bw_def_na;
+		data->output_bppdp4_lane_hbr2[k] = bw_def_na;
+		data->output_bppdp4_lane_hbr3[k] = bw_def_na;
+		if (data->enable[k]) {
+			data->output_bpphdmi[k] = bw_fixed_to_int(bw_mul(bw_div(bw_min2(bw_int_to_fixed(600), data->max_phyclk), data->pixel_rate[k]), bw_int_to_fixed(24)));
+			if (bw_meq(data->max_phyclk, bw_int_to_fixed(270))) {
+				data->output_bppdp4_lane_hbr[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(270), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
+			}
+			if (bw_meq(data->max_phyclk, bw_int_to_fixed(540))) {
+				data->output_bppdp4_lane_hbr2[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(540), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
+			}
+			if (bw_meq(data->max_phyclk, bw_int_to_fixed(810))) {
+				data->output_bppdp4_lane_hbr3[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(810), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
+			}
+		}
+	}
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
+	struct bw_calcs_vbios *bw_vbios,
+	enum bw_calcs_version version)
+{
+	struct bw_calcs_dceip dceip = { 0 };
+	struct bw_calcs_vbios vbios = { 0 };
+
+	dceip.version = version;
+
+	switch (version) {
+	case BW_CALCS_VERSION_CARRIZO:
+		vbios.memory_type = bw_def_gddr5;
+		vbios.dram_channel_width_in_bits = 64;
+		vbios.number_of_dram_channels = 2;
+		vbios.number_of_dram_banks = 8;
+		vbios.high_yclk = bw_int_to_fixed(1600);
+		vbios.mid_yclk = bw_int_to_fixed(1600);
+		vbios.low_yclk = bw_frc_to_fixed(66666, 100);
+		vbios.low_sclk = bw_int_to_fixed(200);
+		vbios.mid1_sclk = bw_int_to_fixed(300);
+		vbios.mid2_sclk = bw_int_to_fixed(300);
+		vbios.mid3_sclk = bw_int_to_fixed(300);
+		vbios.mid4_sclk = bw_int_to_fixed(300);
+		vbios.mid5_sclk = bw_int_to_fixed(300);
+		vbios.mid6_sclk = bw_int_to_fixed(300);
+		vbios.high_sclk = bw_frc_to_fixed(62609, 100);
+		vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
+		vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
+		vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
+		vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+		vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.data_return_bus_width = bw_int_to_fixed(32);
+		vbios.trc = bw_int_to_fixed(50);
+		vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+		vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(153, 10);
+		vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+		vbios.nbp_state_change_latency = bw_frc_to_fixed(19649, 1000);
+		vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+		vbios.scatter_gather_enable = true;
+		vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+		vbios.cursor_width = 32;
+		vbios.average_compression_rate = 4;
+		vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+		vbios.blackout_duration = bw_int_to_fixed(18); /* us */
+		vbios.maximum_blackout_recovery_time = bw_int_to_fixed(20);
+
+		dceip.large_cursor = false;
+		dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+		dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+		dceip.cursor_max_outstanding_group_num = 1;
+		dceip.lines_interleaved_into_lb = 2;
+		dceip.chunk_width = 256;
+		dceip.number_of_graphics_pipes = 3;
+		dceip.number_of_underlay_pipes = 1;
+		dceip.low_power_tiling_mode = 0;
+		dceip.display_write_back_supported = false;
+		dceip.argb_compression_support = false;
+		dceip.underlay_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35556, 10000);
+		dceip.underlay_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.underlay_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.underlay_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.graphics_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35, 10);
+		dceip.graphics_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.graphics_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.graphics_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+		dceip.max_dmif_buffer_allocated = 2;
+		dceip.graphics_dmif_size = 12288;
+		dceip.underlay_luma_dmif_size = 19456;
+		dceip.underlay_chroma_dmif_size = 23552;
+		dceip.pre_downscaler_enabled = true;
+		dceip.underlay_downscale_prefetch_enabled = true;
+		dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+		dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
+		dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
+		dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+			bw_int_to_fixed(0);
+		dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.underlay420_chroma_lb_size_per_component =
+			bw_int_to_fixed(164352);
+		dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.cursor_chunk_width = bw_int_to_fixed(64);
+		dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+		dceip.underlay_maximum_width_efficient_for_tiling =
+			bw_int_to_fixed(1920);
+		dceip.underlay_maximum_height_efficient_for_tiling =
+			bw_int_to_fixed(1080);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+			bw_frc_to_fixed(3, 10);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+			bw_int_to_fixed(25);
+		dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+			2);
+		dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+			bw_int_to_fixed(128);
+		dceip.limit_excessive_outstanding_dmif_requests = true;
+		dceip.linear_mode_line_request_alternation_slice =
+			bw_int_to_fixed(64);
+		dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+			32;
+		dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+		dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+		dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+		dceip.dispclk_per_request = bw_int_to_fixed(2);
+		dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+		dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+		dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+		dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
+		break;
+	case BW_CALCS_VERSION_POLARIS10:
+		vbios.memory_type = bw_def_gddr5;
+		vbios.dram_channel_width_in_bits = 32;
+		vbios.number_of_dram_channels = 8;
+		vbios.number_of_dram_banks = 8;
+		vbios.high_yclk = bw_int_to_fixed(6000);
+		vbios.mid_yclk = bw_int_to_fixed(3200);
+		vbios.low_yclk = bw_int_to_fixed(1000);
+		vbios.low_sclk = bw_int_to_fixed(300);
+		vbios.mid1_sclk = bw_int_to_fixed(400);
+		vbios.mid2_sclk = bw_int_to_fixed(500);
+		vbios.mid3_sclk = bw_int_to_fixed(600);
+		vbios.mid4_sclk = bw_int_to_fixed(700);
+		vbios.mid5_sclk = bw_int_to_fixed(800);
+		vbios.mid6_sclk = bw_int_to_fixed(974);
+		vbios.high_sclk = bw_int_to_fixed(1154);
+		vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
+		vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
+		vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
+		vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+		vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.data_return_bus_width = bw_int_to_fixed(32);
+		vbios.trc = bw_int_to_fixed(48);
+		vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
+		vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+		vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+		vbios.nbp_state_change_latency = bw_int_to_fixed(45);
+		vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+		vbios.scatter_gather_enable = true;
+		vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+		vbios.cursor_width = 32;
+		vbios.average_compression_rate = 4;
+		vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+		vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+		vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+		dceip.large_cursor = false;
+		dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+		dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+		dceip.cursor_max_outstanding_group_num = 1;
+		dceip.lines_interleaved_into_lb = 2;
+		dceip.chunk_width = 256;
+		dceip.number_of_graphics_pipes = 6;
+		dceip.number_of_underlay_pipes = 0;
+		dceip.low_power_tiling_mode = 0;
+		dceip.display_write_back_supported = false;
+		dceip.argb_compression_support = true;
+		dceip.underlay_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35556, 10000);
+		dceip.underlay_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.underlay_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.underlay_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.graphics_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35, 10);
+		dceip.graphics_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.graphics_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.graphics_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+		dceip.max_dmif_buffer_allocated = 4;
+		dceip.graphics_dmif_size = 12288;
+		dceip.underlay_luma_dmif_size = 19456;
+		dceip.underlay_chroma_dmif_size = 23552;
+		dceip.pre_downscaler_enabled = true;
+		dceip.underlay_downscale_prefetch_enabled = true;
+		dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+		dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
+		dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
+		dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+			bw_int_to_fixed(1);
+		dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.underlay420_chroma_lb_size_per_component =
+			bw_int_to_fixed(164352);
+		dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.cursor_chunk_width = bw_int_to_fixed(64);
+		dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+		dceip.underlay_maximum_width_efficient_for_tiling =
+			bw_int_to_fixed(1920);
+		dceip.underlay_maximum_height_efficient_for_tiling =
+			bw_int_to_fixed(1080);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+			bw_frc_to_fixed(3, 10);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+			bw_int_to_fixed(25);
+		dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+			2);
+		dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+			bw_int_to_fixed(128);
+		dceip.limit_excessive_outstanding_dmif_requests = true;
+		dceip.linear_mode_line_request_alternation_slice =
+			bw_int_to_fixed(64);
+		dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+			32;
+		dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+		dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+		dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+		dceip.dispclk_per_request = bw_int_to_fixed(2);
+		dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+		dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+		dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+		dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+		break;
+	case BW_CALCS_VERSION_POLARIS11:
+		vbios.memory_type = bw_def_gddr5;
+		vbios.dram_channel_width_in_bits = 32;
+		vbios.number_of_dram_channels = 4;
+		vbios.number_of_dram_banks = 8;
+		vbios.high_yclk = bw_int_to_fixed(6000);
+		vbios.mid_yclk = bw_int_to_fixed(3200);
+		vbios.low_yclk = bw_int_to_fixed(1000);
+		vbios.low_sclk = bw_int_to_fixed(300);
+		vbios.mid1_sclk = bw_int_to_fixed(400);
+		vbios.mid2_sclk = bw_int_to_fixed(500);
+		vbios.mid3_sclk = bw_int_to_fixed(600);
+		vbios.mid4_sclk = bw_int_to_fixed(700);
+		vbios.mid5_sclk = bw_int_to_fixed(800);
+		vbios.mid6_sclk = bw_int_to_fixed(974);
+		vbios.high_sclk = bw_int_to_fixed(1154);
+		vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
+		vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
+		vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
+		vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+		vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.data_return_bus_width = bw_int_to_fixed(32);
+		vbios.trc = bw_int_to_fixed(48);
+		vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
+		vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+		vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+		vbios.nbp_state_change_latency = bw_int_to_fixed(45);
+		vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+		vbios.scatter_gather_enable = true;
+		vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+		vbios.cursor_width = 32;
+		vbios.average_compression_rate = 4;
+		vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+		vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+		vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+		dceip.large_cursor = false;
+		dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+		dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+		dceip.cursor_max_outstanding_group_num = 1;
+		dceip.lines_interleaved_into_lb = 2;
+		dceip.chunk_width = 256;
+		dceip.number_of_graphics_pipes = 5;
+		dceip.number_of_underlay_pipes = 0;
+		dceip.low_power_tiling_mode = 0;
+		dceip.display_write_back_supported = false;
+		dceip.argb_compression_support = true;
+		dceip.underlay_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35556, 10000);
+		dceip.underlay_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.underlay_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.underlay_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.graphics_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35, 10);
+		dceip.graphics_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.graphics_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.graphics_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+		dceip.max_dmif_buffer_allocated = 4;
+		dceip.graphics_dmif_size = 12288;
+		dceip.underlay_luma_dmif_size = 19456;
+		dceip.underlay_chroma_dmif_size = 23552;
+		dceip.pre_downscaler_enabled = true;
+		dceip.underlay_downscale_prefetch_enabled = true;
+		dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+		dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
+		dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
+		dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+			bw_int_to_fixed(1);
+		dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.underlay420_chroma_lb_size_per_component =
+			bw_int_to_fixed(164352);
+		dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.cursor_chunk_width = bw_int_to_fixed(64);
+		dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+		dceip.underlay_maximum_width_efficient_for_tiling =
+			bw_int_to_fixed(1920);
+		dceip.underlay_maximum_height_efficient_for_tiling =
+			bw_int_to_fixed(1080);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+			bw_frc_to_fixed(3, 10);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+			bw_int_to_fixed(25);
+		dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+			2);
+		dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+			bw_int_to_fixed(128);
+		dceip.limit_excessive_outstanding_dmif_requests = true;
+		dceip.linear_mode_line_request_alternation_slice =
+			bw_int_to_fixed(64);
+		dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+			32;
+		dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+		dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+		dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+		dceip.dispclk_per_request = bw_int_to_fixed(2);
+		dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+		dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+		dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+		dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+		break;
+	case BW_CALCS_VERSION_STONEY:
+		vbios.memory_type = bw_def_gddr5;
+		vbios.dram_channel_width_in_bits = 64;
+		vbios.number_of_dram_channels = 1;
+		vbios.number_of_dram_banks = 8;
+		vbios.high_yclk = bw_int_to_fixed(1866);
+		vbios.mid_yclk = bw_int_to_fixed(1866);
+		vbios.low_yclk = bw_int_to_fixed(1333);
+		vbios.low_sclk = bw_int_to_fixed(200);
+		vbios.mid1_sclk = bw_int_to_fixed(600);
+		vbios.mid2_sclk = bw_int_to_fixed(600);
+		vbios.mid3_sclk = bw_int_to_fixed(600);
+		vbios.mid4_sclk = bw_int_to_fixed(600);
+		vbios.mid5_sclk = bw_int_to_fixed(600);
+		vbios.mid6_sclk = bw_int_to_fixed(600);
+		vbios.high_sclk = bw_int_to_fixed(800);
+		vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
+		vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
+		vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
+		vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+		vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.data_return_bus_width = bw_int_to_fixed(32);
+		vbios.trc = bw_int_to_fixed(50);
+		vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+		vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(158, 10);
+		vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+		vbios.nbp_state_change_latency = bw_frc_to_fixed(2008, 100);
+		vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+		vbios.scatter_gather_enable = true;
+		vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+		vbios.cursor_width = 32;
+		vbios.average_compression_rate = 4;
+		vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+		vbios.blackout_duration = bw_int_to_fixed(18); /* us */
+		vbios.maximum_blackout_recovery_time = bw_int_to_fixed(20);
+
+		dceip.large_cursor = false;
+		dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+		dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+		dceip.cursor_max_outstanding_group_num = 1;
+		dceip.lines_interleaved_into_lb = 2;
+		dceip.chunk_width = 256;
+		dceip.number_of_graphics_pipes = 2;
+		dceip.number_of_underlay_pipes = 1;
+		dceip.low_power_tiling_mode = 0;
+		dceip.display_write_back_supported = false;
+		dceip.argb_compression_support = true;
+		dceip.underlay_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35556, 10000);
+		dceip.underlay_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.underlay_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.underlay_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.graphics_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35, 10);
+		dceip.graphics_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.graphics_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.graphics_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+		dceip.max_dmif_buffer_allocated = 2;
+		dceip.graphics_dmif_size = 12288;
+		dceip.underlay_luma_dmif_size = 19456;
+		dceip.underlay_chroma_dmif_size = 23552;
+		dceip.pre_downscaler_enabled = true;
+		dceip.underlay_downscale_prefetch_enabled = true;
+		dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+		dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
+		dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
+		dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+			bw_int_to_fixed(0);
+		dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.underlay420_chroma_lb_size_per_component =
+			bw_int_to_fixed(164352);
+		dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.cursor_chunk_width = bw_int_to_fixed(64);
+		dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+		dceip.underlay_maximum_width_efficient_for_tiling =
+			bw_int_to_fixed(1920);
+		dceip.underlay_maximum_height_efficient_for_tiling =
+			bw_int_to_fixed(1080);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+			bw_frc_to_fixed(3, 10);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+			bw_int_to_fixed(25);
+		dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+			2);
+		dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+			bw_int_to_fixed(128);
+		dceip.limit_excessive_outstanding_dmif_requests = true;
+		dceip.linear_mode_line_request_alternation_slice =
+			bw_int_to_fixed(64);
+		dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+			32;
+		dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+		dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+		dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+		dceip.dispclk_per_request = bw_int_to_fixed(2);
+		dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+		dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+		dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+		dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+		break;
+	default:
+		break;
+	}
+	*bw_dceip = dceip;
+	*bw_vbios = vbios;
+
+}
+
+/**
+ * Compare calculated (required) clocks against the clocks available at
+ * maximum voltage (max Performance Level).
+ */
+static bool is_display_configuration_supported(
+	const struct bw_calcs_vbios *vbios,
+	const struct bw_calcs_output *calcs_output)
+{
+	uint32_t int_max_clk;
+
+	int_max_clk = bw_fixed_to_int(vbios->high_voltage_max_dispclk);
+	int_max_clk *= 1000; /* MHz to kHz */
+	if (calcs_output->dispclk_khz > int_max_clk)
+		return false;
+
+	int_max_clk = bw_fixed_to_int(vbios->high_sclk);
+	int_max_clk *= 1000; /* MHz to kHz */
+	if (calcs_output->required_sclk > int_max_clk)
+		return false;
+
+	return true;
+}
+
+static void populate_initial_data(
+	const struct pipe_ctx pipe[], int pipe_count, struct bw_calcs_data *data)
+{
+	int i, j;
+	int num_displays = 0;
+
+	data->underlay_surface_type = bw_def_420;
+	data->panning_and_bezel_adjustment = bw_def_none;
+	data->graphics_lb_bpc = 10;
+	data->underlay_lb_bpc = 8;
+	data->underlay_tiling_mode = bw_def_tiled;
+	data->graphics_tiling_mode = bw_def_tiled;
+	data->underlay_micro_tile_mode = bw_def_display_micro_tiling;
+	data->graphics_micro_tile_mode = bw_def_display_micro_tiling;
+
+	/* Pipes with underlay first */
+	for (i = 0; i < pipe_count; i++) {
+		if (!pipe[i].stream || !pipe[i].bottom_pipe)
+			continue;
+
+		ASSERT(pipe[i].surface);
+
+		if (num_displays == 0) {
+			if (!pipe[i].surface->public.visible)
+				data->d0_underlay_mode = bw_def_underlay_only;
+			else
+				data->d0_underlay_mode = bw_def_blend;
+		} else {
+			if (!pipe[i].surface->public.visible)
+				data->d1_underlay_mode = bw_def_underlay_only;
+			else
+				data->d1_underlay_mode = bw_def_blend;
+		}
+
+		data->fbc_en[num_displays + 4] = false;
+		data->lpt_en[num_displays + 4] = false;
+		data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->public.timing.h_total);
+		data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->public.timing.v_total);
+		data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->public.timing.pix_clk_khz, 1000);
+		data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].scl_data.viewport.width);
+		data->pitch_in_pixels[num_displays + 4] = bw_int_to_fixed(pipe[i].surface->public.plane_size.grph.surface_pitch);
+		data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].scl_data.viewport.height);
+		data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].scl_data.taps.h_taps);
+		data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].scl_data.taps.v_taps);
+		data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].scl_data.ratios.horz.value);
+		data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].scl_data.ratios.vert.value);
+		switch (pipe[i].surface->public.rotation) {
+		case ROTATION_ANGLE_0:
+			data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+			break;
+		case ROTATION_ANGLE_90:
+			data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90);
+			break;
+		case ROTATION_ANGLE_180:
+			data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180);
+			break;
+		case ROTATION_ANGLE_270:
+			data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270);
+			break;
+		default:
+			break;
+		}
+		switch (pipe[i].surface->public.format) {
+		case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+		case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+		case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+			data->bytes_per_pixel[num_displays + 4] = 2;
+			break;
+		case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+		case SURFACE_PIXEL_FORMAT_GRPH_BGRA8888:
+		case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+		case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+		case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+			data->bytes_per_pixel[num_displays + 4] = 4;
+			break;
+		case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+		case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+			data->bytes_per_pixel[num_displays + 4] = 8;
+			break;
+		default:
+			data->bytes_per_pixel[num_displays + 4] = 4;
+			break;
+		}
+		data->interlace_mode[num_displays + 4] = false;
+		data->stereo_mode[num_displays + 4] = bw_def_mono;
+
+
+		for (j = 0; j < 2; j++) {
+			data->fbc_en[num_displays * 2 + j] = false;
+			data->lpt_en[num_displays * 2 + j] = false;
+
+			data->src_height[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->scl_data.viewport.height);
+			data->src_width[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->scl_data.viewport.width);
+			data->pitch_in_pixels[num_displays * 2 + j] = bw_int_to_fixed(
+					pipe[i].bottom_pipe->surface->public.plane_size.grph.surface_pitch);
+			data->h_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->scl_data.taps.h_taps);
+			data->v_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->scl_data.taps.v_taps);
+			data->h_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed(
+					pipe[i].bottom_pipe->scl_data.ratios.horz.value);
+			data->v_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed(
+					pipe[i].bottom_pipe->scl_data.ratios.vert.value);
+			switch (pipe[i].bottom_pipe->surface->public.rotation) {
+			case ROTATION_ANGLE_0:
+				data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(0);
+				break;
+			case ROTATION_ANGLE_90:
+				data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(90);
+				break;
+			case ROTATION_ANGLE_180:
+				data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(180);
+				break;
+			case ROTATION_ANGLE_270:
+				data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(270);
+				break;
+			default:
+				break;
+			}
+			data->stereo_mode[num_displays * 2 + j] = bw_def_mono;
+		}
+
+		num_displays++;
+	}
+
+	/* Pipes without underlay after */
+	for (i = 0; i < pipe_count; i++) {
+		if (!pipe[i].stream || pipe[i].bottom_pipe)
+			continue;
+
+
+		data->fbc_en[num_displays + 4] = false;
+		data->lpt_en[num_displays + 4] = false;
+		data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->public.timing.h_total);
+		data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->public.timing.v_total);
+		data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->public.timing.pix_clk_khz, 1000);
+		if (pipe[i].surface) {
+			data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].scl_data.viewport.width);
+			data->pitch_in_pixels[num_displays + 4] = bw_int_to_fixed(pipe[i].surface->public.plane_size.grph.surface_pitch);
+			data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].scl_data.viewport.height);
+			data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].scl_data.taps.h_taps);
+			data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].scl_data.taps.v_taps);
+			data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].scl_data.ratios.horz.value);
+			data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].scl_data.ratios.vert.value);
+			switch (pipe[i].surface->public.rotation) {
+			case ROTATION_ANGLE_0:
+				data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+				break;
+			case ROTATION_ANGLE_90:
+				data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90);
+				break;
+			case ROTATION_ANGLE_180:
+				data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180);
+				break;
+			case ROTATION_ANGLE_270:
+				data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270);
+				break;
+			default:
+				break;
+			}
+			switch (pipe[i].surface->public.format) {
+			case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+			case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+			case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+				data->bytes_per_pixel[num_displays + 4] = 2;
+				break;
+			case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+			case SURFACE_PIXEL_FORMAT_GRPH_BGRA8888:
+			case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+			case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+			case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+				data->bytes_per_pixel[num_displays + 4] = 4;
+				break;
+			case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+			case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+				data->bytes_per_pixel[num_displays + 4] = 8;
+				break;
+			default:
+				data->bytes_per_pixel[num_displays + 4] = 4;
+				break;
+			}
+		} else {
+			data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->public.timing.h_addressable);
+			data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
+			data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->public.timing.v_addressable);
+			data->h_taps[num_displays + 4] = bw_int_to_fixed(1);
+			data->v_taps[num_displays + 4] = bw_int_to_fixed(1);
+			data->h_scale_ratio[num_displays + 4] = bw_int_to_fixed(1);
+			data->v_scale_ratio[num_displays + 4] = bw_int_to_fixed(1);
+			data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+			data->bytes_per_pixel[num_displays + 4] = 4;
+		}
+
+		data->interlace_mode[num_displays + 4] = false;
+		data->stereo_mode[num_displays + 4] = bw_def_mono;
+		num_displays++;
+	}
+
+	data->number_of_displays = num_displays;
+}
+
+/**
+ * Return:
+ *	true -	Display(s) configuration supported.
+ *		In this case 'calcs_output' contains data for HW programming
+ *	false - Display(s) configuration not supported (not enough bandwidth).
+ */
+
+bool bw_calcs(struct dc_context *ctx,
+	const struct bw_calcs_dceip *dceip,
+	const struct bw_calcs_vbios *vbios,
+	const struct pipe_ctx pipe[],
+	int pipe_count,
+	struct bw_calcs_output *calcs_output)
+{
+	struct bw_calcs_data *data = dm_alloc(sizeof(struct bw_calcs_data));
+
+	populate_initial_data(pipe, pipe_count, data);
+
+	/*TODO: this should be taken out calcs output and assigned during timing sync for pplib use*/
+	calcs_output->all_displays_in_sync = false;
+
+	if (data->number_of_displays != 0) {
+		uint8_t yclk_lvl, sclk_lvl;
+		struct bw_fixed high_sclk = vbios->high_sclk;
+		struct bw_fixed mid1_sclk = vbios->mid1_sclk;
+		struct bw_fixed mid2_sclk = vbios->mid2_sclk;
+		struct bw_fixed mid3_sclk = vbios->mid3_sclk;
+		struct bw_fixed mid4_sclk = vbios->mid4_sclk;
+		struct bw_fixed mid5_sclk = vbios->mid5_sclk;
+		struct bw_fixed mid6_sclk = vbios->mid6_sclk;
+		struct bw_fixed low_sclk = vbios->low_sclk;
+		struct bw_fixed high_yclk = vbios->high_yclk;
+		struct bw_fixed mid_yclk = vbios->mid_yclk;
+		struct bw_fixed low_yclk = vbios->low_yclk;
+
+		calculate_bandwidth(dceip, vbios, data);
+
+		yclk_lvl = data->y_clk_level;
+		sclk_lvl = data->sclk_level;
+
+		calcs_output->nbp_state_change_enable =
+			data->nbp_state_change_enable;
+		calcs_output->cpuc_state_change_enable =
+				data->cpuc_state_change_enable;
+		calcs_output->cpup_state_change_enable =
+				data->cpup_state_change_enable;
+		calcs_output->stutter_mode_enable =
+				data->stutter_mode_enable;
+		calcs_output->dispclk_khz =
+			bw_fixed_to_int(bw_mul(data->dispclk,
+					bw_int_to_fixed(1000)));
+		calcs_output->blackout_recovery_time_us =
+			bw_fixed_to_int(data->blackout_recovery_time);
+		calcs_output->required_sclk =
+			bw_fixed_to_int(bw_mul(data->required_sclk,
+					bw_int_to_fixed(1000)));
+		calcs_output->required_sclk_deep_sleep =
+			bw_fixed_to_int(bw_mul(data->sclk_deep_sleep,
+					bw_int_to_fixed(1000)));
+		if (yclk_lvl == 0)
+			calcs_output->required_yclk = bw_fixed_to_int(
+				bw_mul(low_yclk, bw_int_to_fixed(1000)));
+		else if (yclk_lvl == 1)
+			calcs_output->required_yclk = bw_fixed_to_int(
+				bw_mul(mid_yclk, bw_int_to_fixed(1000)));
+		else
+			calcs_output->required_yclk = bw_fixed_to_int(
+				bw_mul(high_yclk, bw_int_to_fixed(1000)));
+
+		/* units: nanosecond, 16bit storage. */
+
+		calcs_output->nbp_state_change_wm_ns[0].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
+		calcs_output->nbp_state_change_wm_ns[1].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+		calcs_output->nbp_state_change_wm_ns[2].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+
+		if (ctx->dc->caps.max_slave_planes) {
+			calcs_output->nbp_state_change_wm_ns[3].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+			calcs_output->nbp_state_change_wm_ns[4].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+							nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+		} else {
+			calcs_output->nbp_state_change_wm_ns[3].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+			calcs_output->nbp_state_change_wm_ns[4].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+		}
+		calcs_output->nbp_state_change_wm_ns[5].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+		calcs_output->stutter_exit_wm_ns[0].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+		calcs_output->stutter_exit_wm_ns[1].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+		calcs_output->stutter_exit_wm_ns[2].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+		if (ctx->dc->caps.max_slave_planes) {
+			calcs_output->stutter_exit_wm_ns[3].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+			calcs_output->stutter_exit_wm_ns[4].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+		} else {
+			calcs_output->stutter_exit_wm_ns[3].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+			calcs_output->stutter_exit_wm_ns[4].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+		}
+		calcs_output->stutter_exit_wm_ns[5].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+		calcs_output->urgent_wm_ns[0].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				urgent_watermark[4], bw_int_to_fixed(1000)));
+		calcs_output->urgent_wm_ns[1].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				urgent_watermark[5], bw_int_to_fixed(1000)));
+		calcs_output->urgent_wm_ns[2].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				urgent_watermark[6], bw_int_to_fixed(1000)));
+		if (ctx->dc->caps.max_slave_planes) {
+			calcs_output->urgent_wm_ns[3].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[0], bw_int_to_fixed(1000)));
+			calcs_output->urgent_wm_ns[4].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[1], bw_int_to_fixed(1000)));
+		} else {
+			calcs_output->urgent_wm_ns[3].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[7], bw_int_to_fixed(1000)));
+			calcs_output->urgent_wm_ns[4].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[8], bw_int_to_fixed(1000)));
+		}
+		calcs_output->urgent_wm_ns[5].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				urgent_watermark[9], bw_int_to_fixed(1000)));
+
+		if (dceip->version != BW_CALCS_VERSION_CARRIZO) {
+			((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk;
+			calculate_bandwidth(dceip, vbios, data);
+
+			calcs_output->nbp_state_change_wm_ns[0].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[4],bw_int_to_fixed(1000)));
+			calcs_output->nbp_state_change_wm_ns[1].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+			calcs_output->nbp_state_change_wm_ns[2].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+
+			if (ctx->dc->caps.max_slave_planes) {
+				calcs_output->nbp_state_change_wm_ns[3].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+				calcs_output->nbp_state_change_wm_ns[4].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+			} else {
+				calcs_output->nbp_state_change_wm_ns[3].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+				calcs_output->nbp_state_change_wm_ns[4].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+			}
+			calcs_output->nbp_state_change_wm_ns[5].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+			calcs_output->stutter_exit_wm_ns[0].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+			calcs_output->stutter_exit_wm_ns[1].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+			calcs_output->stutter_exit_wm_ns[2].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+			if (ctx->dc->caps.max_slave_planes) {
+				calcs_output->stutter_exit_wm_ns[3].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+				calcs_output->stutter_exit_wm_ns[4].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+			} else {
+				calcs_output->stutter_exit_wm_ns[3].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+				calcs_output->stutter_exit_wm_ns[4].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+			}
+			calcs_output->stutter_exit_wm_ns[5].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+			calcs_output->urgent_wm_ns[0].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[4], bw_int_to_fixed(1000)));
+			calcs_output->urgent_wm_ns[1].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[5], bw_int_to_fixed(1000)));
+			calcs_output->urgent_wm_ns[2].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[6], bw_int_to_fixed(1000)));
+			if (ctx->dc->caps.max_slave_planes) {
+				calcs_output->urgent_wm_ns[3].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						urgent_watermark[0], bw_int_to_fixed(1000)));
+				calcs_output->urgent_wm_ns[4].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						urgent_watermark[1], bw_int_to_fixed(1000)));
+			} else {
+				calcs_output->urgent_wm_ns[3].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						urgent_watermark[7], bw_int_to_fixed(1000)));
+				calcs_output->urgent_wm_ns[4].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						urgent_watermark[8], bw_int_to_fixed(1000)));
+			}
+			calcs_output->urgent_wm_ns[5].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[9], bw_int_to_fixed(1000)));
+
+			((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk;
+			((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk;
+			calculate_bandwidth(dceip, vbios, data);
+
+			calcs_output->nbp_state_change_wm_ns[0].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
+			calcs_output->nbp_state_change_wm_ns[1].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+			calcs_output->nbp_state_change_wm_ns[2].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+			if (ctx->dc->caps.max_slave_planes) {
+				calcs_output->nbp_state_change_wm_ns[3].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+				calcs_output->nbp_state_change_wm_ns[4].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+			} else {
+				calcs_output->nbp_state_change_wm_ns[3].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+				calcs_output->nbp_state_change_wm_ns[4].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+			}
+			calcs_output->nbp_state_change_wm_ns[5].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+
+			calcs_output->stutter_exit_wm_ns[0].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+			calcs_output->stutter_exit_wm_ns[1].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+			calcs_output->stutter_exit_wm_ns[2].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+			if (ctx->dc->caps.max_slave_planes) {
+				calcs_output->stutter_exit_wm_ns[3].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+				calcs_output->stutter_exit_wm_ns[4].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+			} else {
+				calcs_output->stutter_exit_wm_ns[3].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+				calcs_output->stutter_exit_wm_ns[4].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+			}
+			calcs_output->stutter_exit_wm_ns[5].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+			calcs_output->urgent_wm_ns[0].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[4], bw_int_to_fixed(1000)));
+			calcs_output->urgent_wm_ns[1].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[5], bw_int_to_fixed(1000)));
+			calcs_output->urgent_wm_ns[2].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[6], bw_int_to_fixed(1000)));
+			if (ctx->dc->caps.max_slave_planes) {
+				calcs_output->urgent_wm_ns[3].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						urgent_watermark[0], bw_int_to_fixed(1000)));
+				calcs_output->urgent_wm_ns[4].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						urgent_watermark[1], bw_int_to_fixed(1000)));
+			} else {
+				calcs_output->urgent_wm_ns[3].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						urgent_watermark[7], bw_int_to_fixed(1000)));
+				calcs_output->urgent_wm_ns[4].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						urgent_watermark[8], bw_int_to_fixed(1000)));
+			}
+			calcs_output->urgent_wm_ns[5].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[9], bw_int_to_fixed(1000)));
+		}
+
+		if (dceip->version == BW_CALCS_VERSION_CARRIZO) {
+			((struct bw_calcs_vbios *)vbios)->low_yclk = high_yclk;
+			((struct bw_calcs_vbios *)vbios)->mid_yclk = high_yclk;
+			((struct bw_calcs_vbios *)vbios)->low_sclk = high_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid1_sclk = high_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid2_sclk = high_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid3_sclk = high_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid4_sclk = high_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid5_sclk = high_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid6_sclk = high_sclk;
+		} else {
+			((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk;
+			((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk;
+		}
+
+		calculate_bandwidth(dceip, vbios, data);
+
+		calcs_output->nbp_state_change_wm_ns[0].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
+		calcs_output->nbp_state_change_wm_ns[1].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+		calcs_output->nbp_state_change_wm_ns[2].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+		if (ctx->dc->caps.max_slave_planes) {
+			calcs_output->nbp_state_change_wm_ns[3].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+			calcs_output->nbp_state_change_wm_ns[4].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+		} else {
+			calcs_output->nbp_state_change_wm_ns[3].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+			calcs_output->nbp_state_change_wm_ns[4].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+		}
+		calcs_output->nbp_state_change_wm_ns[5].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+		calcs_output->stutter_exit_wm_ns[0].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+		calcs_output->stutter_exit_wm_ns[1].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+		calcs_output->stutter_exit_wm_ns[2].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+		if (ctx->dc->caps.max_slave_planes) {
+			calcs_output->stutter_exit_wm_ns[3].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+			calcs_output->stutter_exit_wm_ns[4].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+		} else {
+			calcs_output->stutter_exit_wm_ns[3].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+			calcs_output->stutter_exit_wm_ns[4].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+		}
+		calcs_output->stutter_exit_wm_ns[5].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+
+		calcs_output->urgent_wm_ns[0].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				urgent_watermark[4], bw_int_to_fixed(1000)));
+		calcs_output->urgent_wm_ns[1].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				urgent_watermark[5], bw_int_to_fixed(1000)));
+		calcs_output->urgent_wm_ns[2].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				urgent_watermark[6], bw_int_to_fixed(1000)));
+		if (ctx->dc->caps.max_slave_planes) {
+			calcs_output->urgent_wm_ns[3].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[0], bw_int_to_fixed(1000)));
+			calcs_output->urgent_wm_ns[4].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[1], bw_int_to_fixed(1000)));
+		} else {
+			calcs_output->urgent_wm_ns[3].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[7], bw_int_to_fixed(1000)));
+			calcs_output->urgent_wm_ns[4].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[8], bw_int_to_fixed(1000)));
+		}
+		calcs_output->urgent_wm_ns[5].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				urgent_watermark[9], bw_int_to_fixed(1000)));
+
+		((struct bw_calcs_vbios *)vbios)->low_yclk = low_yclk;
+		((struct bw_calcs_vbios *)vbios)->mid_yclk = mid_yclk;
+		((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk;
+		((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk;
+		((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk;
+		((struct bw_calcs_vbios *)vbios)->mid3_sclk = mid3_sclk;
+		((struct bw_calcs_vbios *)vbios)->mid4_sclk = mid4_sclk;
+		((struct bw_calcs_vbios *)vbios)->mid5_sclk = mid5_sclk;
+		((struct bw_calcs_vbios *)vbios)->mid6_sclk = mid6_sclk;
+		((struct bw_calcs_vbios *)vbios)->high_sclk = high_sclk;
+	} else {
+		calcs_output->nbp_state_change_enable = true;
+		calcs_output->cpuc_state_change_enable = true;
+		calcs_output->cpup_state_change_enable = true;
+		calcs_output->stutter_mode_enable = true;
+		calcs_output->dispclk_khz = 0;
+		calcs_output->required_sclk = 0;
+	}
+
+	dm_free(data);
+
+	return is_display_configuration_supported(vbios, calcs_output);
+}

+ 299 - 0
drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c

@@ -0,0 +1,299 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "bw_fixed.h"
+
+#define BITS_PER_FRACTIONAL_PART 24
+
+#define MIN_I32 \
+	(int64_t)(-(1LL << (63 - BITS_PER_FRACTIONAL_PART)))
+
+#define MAX_I32 \
+	(int64_t)((1ULL << (63 - BITS_PER_FRACTIONAL_PART)) - 1)
+
+#define MIN_I64 \
+	(int64_t)(-(1LL << 63))
+
+#define MAX_I64 \
+	(int64_t)((1ULL << 63) - 1)
+
+#define FRACTIONAL_PART_MASK \
+	((1ULL << BITS_PER_FRACTIONAL_PART) - 1)
+
+#define GET_INTEGER_PART(x) \
+	((x) >> BITS_PER_FRACTIONAL_PART)
+
+#define GET_FRACTIONAL_PART(x) \
+	(FRACTIONAL_PART_MASK & (x))
+
+static uint64_t abs_i64(int64_t arg)
+{
+	if (arg >= 0)
+		return (uint64_t)(arg);
+	else
+		return (uint64_t)(-arg);
+}
+
+struct bw_fixed bw_min3(struct bw_fixed v1, struct bw_fixed v2, struct bw_fixed v3)
+{
+	return bw_min2(bw_min2(v1, v2), v3);
+}
+
+struct bw_fixed bw_max3(struct bw_fixed v1, struct bw_fixed v2, struct bw_fixed v3)
+{
+	return bw_max2(bw_max2(v1, v2), v3);
+}
+
+struct bw_fixed bw_int_to_fixed(int64_t value)
+{
+	struct bw_fixed res;
+	ASSERT(value < MAX_I32 && value > MIN_I32);
+	res.value = value << BITS_PER_FRACTIONAL_PART;
+	return res;
+}
+
+int32_t bw_fixed_to_int(struct bw_fixed value)
+{
+	return GET_INTEGER_PART(value.value);
+}
+
+struct bw_fixed bw_frc_to_fixed(int64_t numerator, int64_t denominator)
+{
+	struct bw_fixed res;
+	bool arg1_negative = numerator < 0;
+	bool arg2_negative = denominator < 0;
+	uint64_t arg1_value;
+	uint64_t arg2_value;
+	uint64_t remainder;
+
+	/* determine integer part */
+	uint64_t res_value;
+
+	ASSERT(denominator != 0);
+
+	arg1_value = abs_i64(numerator);
+	arg2_value = abs_i64(denominator);
+	res_value = div64_u64_rem(arg1_value, arg2_value, &remainder);
+
+	ASSERT(res_value <= MAX_I32);
+
+	/* determine fractional part */
+	{
+		uint32_t i = BITS_PER_FRACTIONAL_PART;
+
+		do
+		{
+			remainder <<= 1;
+
+			res_value <<= 1;
+
+			if (remainder >= arg2_value)
+			{
+				res_value |= 1;
+				remainder -= arg2_value;
+			}
+		} while (--i != 0);
+	}
+
+	/* round up LSB */
+	{
+		uint64_t summand = (remainder << 1) >= arg2_value;
+
+		ASSERT(res_value <= MAX_I64 - summand);
+
+		res_value += summand;
+	}
+
+	res.value = (int64_t)(res_value);
+
+	if (arg1_negative ^ arg2_negative)
+		res.value = -res.value;
+	return res;
+}
+
+struct bw_fixed bw_min2(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+	return (arg1.value <= arg2.value) ? arg1 : arg2;
+}
+
+struct bw_fixed bw_max2(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+	return (arg2.value <= arg1.value) ? arg1 : arg2;
+}
+
+struct bw_fixed bw_floor2(
+	const struct bw_fixed arg,
+	const struct bw_fixed significance)
+{
+	struct bw_fixed result;
+	int64_t multiplicand;
+
+	multiplicand = div64_s64(arg.value, abs_i64(significance.value));
+	result.value = abs_i64(significance.value) * multiplicand;
+	ASSERT(abs_i64(result.value) <= abs_i64(arg.value));
+	return result;
+}
+
+struct bw_fixed bw_ceil2(
+	const struct bw_fixed arg,
+	const struct bw_fixed significance)
+{
+	struct bw_fixed result;
+	int64_t multiplicand;
+
+	multiplicand = div64_s64(arg.value, abs_i64(significance.value));
+	result.value = abs_i64(significance.value) * multiplicand;
+	if (abs_i64(result.value) < abs_i64(arg.value)) {
+		if (arg.value < 0)
+			result.value -= abs_i64(significance.value);
+		else
+			result.value += abs_i64(significance.value);
+	}
+	return result;
+}
+
+struct bw_fixed bw_add(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+	struct bw_fixed res;
+
+	res.value = arg1.value + arg2.value;
+
+	return res;
+}
+
+struct bw_fixed bw_sub(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+	struct bw_fixed res;
+
+	res.value = arg1.value - arg2.value;
+
+	return res;
+}
+
+struct bw_fixed bw_mul(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+	struct bw_fixed res;
+
+	bool arg1_negative = arg1.value < 0;
+	bool arg2_negative = arg2.value < 0;
+
+	uint64_t arg1_value = abs_i64(arg1.value);
+	uint64_t arg2_value = abs_i64(arg2.value);
+
+	uint64_t arg1_int = GET_INTEGER_PART(arg1_value);
+	uint64_t arg2_int = GET_INTEGER_PART(arg2_value);
+
+	uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
+	uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
+
+	uint64_t tmp;
+
+	res.value = arg1_int * arg2_int;
+
+	ASSERT(res.value <= MAX_I32);
+
+	res.value <<= BITS_PER_FRACTIONAL_PART;
+
+	tmp = arg1_int * arg2_fra;
+
+	ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
+
+	res.value += tmp;
+
+	tmp = arg2_int * arg1_fra;
+
+	ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
+
+	res.value += tmp;
+
+	tmp = arg1_fra * arg2_fra;
+
+	tmp = (tmp >> BITS_PER_FRACTIONAL_PART) +
+		(tmp >= (uint64_t)(bw_frc_to_fixed(1, 2).value));
+
+	ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
+
+	res.value += tmp;
+
+	if (arg1_negative ^ arg2_negative)
+		res.value = -res.value;
+	return res;
+}
+
+struct bw_fixed bw_div(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+	struct bw_fixed res = bw_frc_to_fixed(arg1.value, arg2.value);
+	return res;
+}
+
+struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+	struct bw_fixed res;
+	div64_u64_rem(arg1.value, arg2.value, &res.value);
+	return res;
+}
+struct bw_fixed fixed31_32_to_bw_fixed(int64_t raw)
+{
+	struct bw_fixed result = { 0 };
+
+	if (raw < 0) {
+		raw = -raw;
+		result.value = -(raw >> (32 - BITS_PER_FRACTIONAL_PART));
+	} else {
+		result.value = raw >> (32 - BITS_PER_FRACTIONAL_PART);
+	}
+
+	return result;
+}
+
+bool bw_equ(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+	return arg1.value == arg2.value;
+}
+
+bool bw_neq(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+	return arg1.value != arg2.value;
+}
+
+bool bw_leq(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+	return arg1.value <= arg2.value;
+}
+
+bool bw_meq(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+	return arg1.value >= arg2.value;
+}
+
+bool bw_ltn(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+	return arg1.value < arg2.value;
+}
+
+bool bw_mtn(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+	return arg1.value > arg2.value;
+}

+ 1382 - 0
drivers/gpu/drm/amd/display/dc/calcs/gamma_calcs.c

@@ -0,0 +1,1382 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "gamma_calcs.h"
+
+struct curve_config {
+	uint32_t offset;
+	int8_t segments[16];
+	int8_t begin;
+};
+
+static bool build_custom_float(
+	struct fixed31_32 value,
+	const struct custom_float_format *format,
+	bool *negative,
+	uint32_t *mantissa,
+	uint32_t *exponenta)
+{
+	uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1;
+
+	const struct fixed31_32 mantissa_constant_plus_max_fraction =
+		dal_fixed31_32_from_fraction(
+			(1LL << (format->mantissa_bits + 1)) - 1,
+			1LL << format->mantissa_bits);
+
+	struct fixed31_32 mantiss;
+
+	if (dal_fixed31_32_eq(
+		value,
+		dal_fixed31_32_zero)) {
+		*negative = false;
+		*mantissa = 0;
+		*exponenta = 0;
+		return true;
+	}
+
+	if (dal_fixed31_32_lt(
+		value,
+		dal_fixed31_32_zero)) {
+		*negative = format->sign;
+		value = dal_fixed31_32_neg(value);
+	} else {
+		*negative = false;
+	}
+
+	if (dal_fixed31_32_lt(
+		value,
+		dal_fixed31_32_one)) {
+		uint32_t i = 1;
+
+		do {
+			value = dal_fixed31_32_shl(value, 1);
+			++i;
+		} while (dal_fixed31_32_lt(
+			value,
+			dal_fixed31_32_one));
+
+		--i;
+
+		if (exp_offset <= i) {
+			*mantissa = 0;
+			*exponenta = 0;
+			return true;
+		}
+
+		*exponenta = exp_offset - i;
+	} else if (dal_fixed31_32_le(
+		mantissa_constant_plus_max_fraction,
+		value)) {
+		uint32_t i = 1;
+
+		do {
+			value = dal_fixed31_32_shr(value, 1);
+			++i;
+		} while (dal_fixed31_32_lt(
+			mantissa_constant_plus_max_fraction,
+			value));
+
+		*exponenta = exp_offset + i - 1;
+	} else {
+		*exponenta = exp_offset;
+	}
+
+	mantiss = dal_fixed31_32_sub(
+		value,
+		dal_fixed31_32_one);
+
+	if (dal_fixed31_32_lt(
+			mantiss,
+			dal_fixed31_32_zero) ||
+		dal_fixed31_32_lt(
+			dal_fixed31_32_one,
+			mantiss))
+		mantiss = dal_fixed31_32_zero;
+	else
+		mantiss = dal_fixed31_32_shl(
+			mantiss,
+			format->mantissa_bits);
+
+	*mantissa = dal_fixed31_32_floor(mantiss);
+
+	return true;
+}
+
+static bool setup_custom_float(
+	const struct custom_float_format *format,
+	bool negative,
+	uint32_t mantissa,
+	uint32_t exponenta,
+	uint32_t *result)
+{
+	uint32_t i = 0;
+	uint32_t j = 0;
+
+	uint32_t value = 0;
+
+	/* verification code:
+	 * once calculation is ok we can remove it
+	 */
+
+	const uint32_t mantissa_mask =
+		(1 << (format->mantissa_bits + 1)) - 1;
+
+	const uint32_t exponenta_mask =
+		(1 << (format->exponenta_bits + 1)) - 1;
+
+	if (mantissa & ~mantissa_mask) {
+		BREAK_TO_DEBUGGER();
+		mantissa = mantissa_mask;
+	}
+
+	if (exponenta & ~exponenta_mask) {
+		BREAK_TO_DEBUGGER();
+		exponenta = exponenta_mask;
+	}
+
+	/* end of verification code */
+
+	while (i < format->mantissa_bits) {
+		uint32_t mask = 1 << i;
+
+		if (mantissa & mask)
+			value |= mask;
+
+		++i;
+	}
+
+	while (j < format->exponenta_bits) {
+		uint32_t mask = 1 << j;
+
+		if (exponenta & mask)
+			value |= mask << i;
+
+		++j;
+	}
+
+	if (negative && format->sign)
+		value |= 1 << (i + j);
+
+	*result = value;
+
+	return true;
+}
+
+static bool convert_to_custom_float_format_ex(
+	struct fixed31_32 value,
+	const struct custom_float_format *format,
+	struct custom_float_value *result)
+{
+	return build_custom_float(
+		value, format,
+		&result->negative, &result->mantissa, &result->exponenta) &&
+	setup_custom_float(
+		format, result->negative, result->mantissa, result->exponenta,
+		&result->value);
+}
+
+static bool round_custom_float_6_12(
+	struct hw_x_point *x)
+{
+	struct custom_float_format fmt;
+
+	struct custom_float_value value;
+
+	fmt.exponenta_bits = 6;
+	fmt.mantissa_bits = 12;
+	fmt.sign = true;
+
+	if (!convert_to_custom_float_format_ex(
+		x->x, &fmt, &value))
+		return false;
+
+	x->adjusted_x = x->x;
+
+	if (value.mantissa) {
+		BREAK_TO_DEBUGGER();
+
+		return false;
+	}
+
+	return true;
+}
+
+static bool build_hw_curve_configuration(
+	const struct curve_config *curve_config,
+	struct gamma_curve *gamma_curve,
+	struct curve_points *curve_points,
+	struct hw_x_point *points,
+	uint32_t *number_of_points)
+{
+	const int8_t max_regions_number = ARRAY_SIZE(curve_config->segments);
+
+	int8_t i;
+
+	uint8_t segments_calculation[8] = { 0 };
+
+	struct fixed31_32 region1 = dal_fixed31_32_zero;
+	struct fixed31_32 region2;
+	struct fixed31_32 increment;
+
+	uint32_t index = 0;
+	uint32_t segments = 0;
+	uint32_t max_number;
+
+	bool result = false;
+
+	if (!number_of_points) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	max_number = *number_of_points;
+
+	i = 0;
+
+	while (i != max_regions_number) {
+		gamma_curve[i].offset = 0;
+		gamma_curve[i].segments_num = 0;
+
+		++i;
+	}
+
+	i = 0;
+
+	while (i != max_regions_number) {
+		/* number should go in uninterruptible sequence */
+		if (curve_config->segments[i] == -1)
+			break;
+
+		ASSERT(curve_config->segments[i] >= 0);
+
+		segments += (1 << curve_config->segments[i]);
+
+		++i;
+	}
+
+	if (segments > max_number) {
+		BREAK_TO_DEBUGGER();
+	} else {
+		int32_t divisor;
+		uint32_t offset = 0;
+		int8_t begin = curve_config->begin;
+		int32_t region_number = 0;
+
+		i = begin;
+
+		while ((index < max_number) &&
+			(region_number < max_regions_number) &&
+			(i <= 1)) {
+			int32_t j = 0;
+
+			segments = curve_config->segments[region_number];
+			divisor = 1 << segments;
+
+			if (segments == -1) {
+				if (i > 0) {
+					region1 = dal_fixed31_32_shl(
+						dal_fixed31_32_one,
+						i - 1);
+					region2 = dal_fixed31_32_shl(
+						dal_fixed31_32_one,
+						i);
+				} else {
+					region1 = dal_fixed31_32_shr(
+						dal_fixed31_32_one,
+						-(i - 1));
+					region2 = dal_fixed31_32_shr(
+						dal_fixed31_32_one,
+						-i);
+				}
+
+				break;
+			}
+
+			if (i > -1) {
+				region1 = dal_fixed31_32_shl(
+					dal_fixed31_32_one,
+					i);
+				region2 = dal_fixed31_32_shl(
+					dal_fixed31_32_one,
+					i + 1);
+			} else {
+				region1 = dal_fixed31_32_shr(
+					dal_fixed31_32_one,
+					-i);
+				region2 = dal_fixed31_32_shr(
+					dal_fixed31_32_one,
+					-(i + 1));
+			}
+
+			gamma_curve[region_number].offset = offset;
+			gamma_curve[region_number].segments_num = segments;
+
+			offset += divisor;
+
+			++segments_calculation[segments];
+
+			increment = dal_fixed31_32_div_int(
+				dal_fixed31_32_sub(
+					region2,
+					region1),
+				divisor);
+
+			points[index].x = region1;
+
+			round_custom_float_6_12(points + index);
+
+			++index;
+			++region_number;
+
+			while ((index < max_number) && (j < divisor - 1)) {
+				region1 = dal_fixed31_32_add(
+					region1,
+					increment);
+
+				points[index].x = region1;
+				points[index].adjusted_x = region1;
+
+				++index;
+				++j;
+			}
+
+			++i;
+		}
+
+		points[index].x = region1;
+
+		round_custom_float_6_12(points + index);
+
+		*number_of_points = index;
+
+		result = true;
+	}
+
+	curve_points[0].x = points[0].adjusted_x;
+	curve_points[0].offset = dal_fixed31_32_zero;
+
+	curve_points[1].x = points[index - 1].adjusted_x;
+	curve_points[1].offset = dal_fixed31_32_zero;
+
+	curve_points[2].x = points[index].adjusted_x;
+	curve_points[2].offset = dal_fixed31_32_zero;
+
+	return result;
+}
+
+static bool setup_distribution_points(
+		struct gamma_curve *arr_curve_points,
+		struct curve_points *arr_points,
+		uint32_t *hw_points_num,
+		struct hw_x_point *coordinates_x)
+{
+	struct curve_config cfg;
+
+	cfg.offset = 0;
+	cfg.segments[0] = 3;
+	cfg.segments[1] = 4;
+	cfg.segments[2] = 4;
+	cfg.segments[3] = 4;
+	cfg.segments[4] = 4;
+	cfg.segments[5] = 4;
+	cfg.segments[6] = 4;
+	cfg.segments[7] = 4;
+	cfg.segments[8] = 5;
+	cfg.segments[9] = 5;
+	cfg.segments[10] = 0;
+	cfg.segments[11] = -1;
+	cfg.segments[12] = -1;
+	cfg.segments[13] = -1;
+	cfg.segments[14] = -1;
+	cfg.segments[15] = -1;
+
+	cfg.begin = -10;
+
+	if (!build_hw_curve_configuration(
+		&cfg, arr_curve_points,
+		arr_points,
+		coordinates_x, hw_points_num)) {
+		ASSERT_CRITICAL(false);
+		return false;
+	}
+	return true;
+}
+
+struct dividers {
+	struct fixed31_32 divider1;
+	struct fixed31_32 divider2;
+	struct fixed31_32 divider3;
+};
+
+static void build_regamma_coefficients(struct gamma_coefficients *coefficients)
+{
+	/* sRGB should apply 2.4 */
+	static const int32_t numerator01[3] = { 31308, 31308, 31308 };
+	static const int32_t numerator02[3] = { 12920, 12920, 12920 };
+	static const int32_t numerator03[3] = { 55, 55, 55 };
+	static const int32_t numerator04[3] = { 55, 55, 55 };
+	static const int32_t numerator05[3] = { 2400, 2400, 2400 };
+
+	const int32_t *numerator1;
+	const int32_t *numerator2;
+	const int32_t *numerator3;
+	const int32_t *numerator4;
+	const int32_t *numerator5;
+
+	uint32_t i = 0;
+
+	numerator1 = numerator01;
+	numerator2 = numerator02;
+	numerator3 = numerator03;
+	numerator4 = numerator04;
+	numerator5 = numerator05;
+
+	do {
+		coefficients->a0[i] = dal_fixed31_32_from_fraction(
+			numerator1[i], 10000000);
+		coefficients->a1[i] = dal_fixed31_32_from_fraction(
+			numerator2[i], 1000);
+		coefficients->a2[i] = dal_fixed31_32_from_fraction(
+			numerator3[i], 1000);
+		coefficients->a3[i] = dal_fixed31_32_from_fraction(
+			numerator4[i], 1000);
+		coefficients->user_gamma[i] = dal_fixed31_32_from_fraction(
+			numerator5[i], 1000);
+
+		++i;
+	} while (i != ARRAY_SIZE(coefficients->a0));
+}
+
+static struct fixed31_32 translate_from_linear_space(
+	struct fixed31_32 arg,
+	struct fixed31_32 a0,
+	struct fixed31_32 a1,
+	struct fixed31_32 a2,
+	struct fixed31_32 a3,
+	struct fixed31_32 gamma)
+{
+	const struct fixed31_32 one = dal_fixed31_32_from_int(1);
+
+	if (dal_fixed31_32_le(arg, dal_fixed31_32_neg(a0)))
+		return dal_fixed31_32_sub(
+			a2,
+			dal_fixed31_32_mul(
+				dal_fixed31_32_add(
+					one,
+					a3),
+				dal_fixed31_32_pow(
+					dal_fixed31_32_neg(arg),
+					dal_fixed31_32_recip(gamma))));
+	else if (dal_fixed31_32_le(a0, arg))
+		return dal_fixed31_32_sub(
+			dal_fixed31_32_mul(
+				dal_fixed31_32_add(
+					one,
+					a3),
+				dal_fixed31_32_pow(
+					arg,
+					dal_fixed31_32_recip(gamma))),
+			a2);
+	else
+		return dal_fixed31_32_mul(
+			arg,
+			a1);
+}
+
+static inline struct fixed31_32 translate_from_linear_space_ex(
+	struct fixed31_32 arg,
+	struct gamma_coefficients *coeff,
+	uint32_t color_index)
+{
+	return translate_from_linear_space(
+		arg,
+		coeff->a0[color_index],
+		coeff->a1[color_index],
+		coeff->a2[color_index],
+		coeff->a3[color_index],
+		coeff->user_gamma[color_index]);
+}
+
+static bool find_software_points(
+	const struct gamma_pixel *axis_x_256,
+	struct fixed31_32 hw_point,
+	enum channel_name channel,
+	uint32_t *index_to_start,
+	uint32_t *index_left,
+	uint32_t *index_right,
+	enum hw_point_position *pos)
+{
+	const uint32_t max_number = RGB_256X3X16 + 3;
+
+	struct fixed31_32 left, right;
+
+	uint32_t i = *index_to_start;
+
+	while (i < max_number) {
+		if (channel == CHANNEL_NAME_RED) {
+			left = axis_x_256[i].r;
+
+			if (i < max_number - 1)
+				right = axis_x_256[i + 1].r;
+			else
+				right = axis_x_256[max_number - 1].r;
+		} else if (channel == CHANNEL_NAME_GREEN) {
+			left = axis_x_256[i].g;
+
+			if (i < max_number - 1)
+				right = axis_x_256[i + 1].g;
+			else
+				right = axis_x_256[max_number - 1].g;
+		} else {
+			left = axis_x_256[i].b;
+
+			if (i < max_number - 1)
+				right = axis_x_256[i + 1].b;
+			else
+				right = axis_x_256[max_number - 1].b;
+		}
+
+		if (dal_fixed31_32_le(left, hw_point) &&
+			dal_fixed31_32_le(hw_point, right)) {
+			*index_to_start = i;
+			*index_left = i;
+
+			if (i < max_number - 1)
+				*index_right = i + 1;
+			else
+				*index_right = max_number - 1;
+
+			*pos = HW_POINT_POSITION_MIDDLE;
+
+			return true;
+		} else if ((i == *index_to_start) &&
+			dal_fixed31_32_le(hw_point, left)) {
+			*index_to_start = i;
+			*index_left = i;
+			*index_right = i;
+
+			*pos = HW_POINT_POSITION_LEFT;
+
+			return true;
+		} else if ((i == max_number - 1) &&
+			dal_fixed31_32_le(right, hw_point)) {
+			*index_to_start = i;
+			*index_left = i;
+			*index_right = i;
+
+			*pos = HW_POINT_POSITION_RIGHT;
+
+			return true;
+		}
+
+		++i;
+	}
+
+	return false;
+}
+
+static bool build_custom_gamma_mapping_coefficients_worker(
+	struct pixel_gamma_point *coeff,
+	const struct hw_x_point *coordinates_x,
+	const struct gamma_pixel *axis_x_256,
+	enum channel_name channel,
+	uint32_t number_of_points,
+	enum surface_pixel_format pixel_format)
+{
+	uint32_t i = 0;
+
+	while (i <= number_of_points) {
+		struct fixed31_32 coord_x;
+
+		uint32_t index_to_start = 0;
+		uint32_t index_left = 0;
+		uint32_t index_right = 0;
+
+		enum hw_point_position hw_pos;
+
+		struct gamma_point *point;
+
+		struct fixed31_32 left_pos;
+		struct fixed31_32 right_pos;
+
+		/*
+		 * TODO: confirm enum in surface_pixel_format
+		 * if (pixel_format == PIXEL_FORMAT_FP16)
+		 *coord_x = coordinates_x[i].adjusted_x;
+		 *else
+		 */
+		if (channel == CHANNEL_NAME_RED)
+			coord_x = coordinates_x[i].regamma_y_red;
+		else if (channel == CHANNEL_NAME_GREEN)
+			coord_x = coordinates_x[i].regamma_y_green;
+		else
+			coord_x = coordinates_x[i].regamma_y_blue;
+
+		if (!find_software_points(
+			axis_x_256, coord_x, channel,
+			&index_to_start, &index_left, &index_right, &hw_pos)) {
+			BREAK_TO_DEBUGGER();
+			return false;
+		}
+
+		if (index_left >= RGB_256X3X16 + 3) {
+			BREAK_TO_DEBUGGER();
+			return false;
+		}
+
+		if (index_right >= RGB_256X3X16 + 3) {
+			BREAK_TO_DEBUGGER();
+			return false;
+		}
+
+		if (channel == CHANNEL_NAME_RED) {
+			point = &coeff[i].r;
+
+			left_pos = axis_x_256[index_left].r;
+			right_pos = axis_x_256[index_right].r;
+		} else if (channel == CHANNEL_NAME_GREEN) {
+			point = &coeff[i].g;
+
+			left_pos = axis_x_256[index_left].g;
+			right_pos = axis_x_256[index_right].g;
+		} else {
+			point = &coeff[i].b;
+
+			left_pos = axis_x_256[index_left].b;
+			right_pos = axis_x_256[index_right].b;
+		}
+
+		if (hw_pos == HW_POINT_POSITION_MIDDLE)
+			point->coeff = dal_fixed31_32_div(
+				dal_fixed31_32_sub(
+					coord_x,
+					left_pos),
+				dal_fixed31_32_sub(
+					right_pos,
+					left_pos));
+		else if (hw_pos == HW_POINT_POSITION_LEFT)
+			point->coeff = dal_fixed31_32_zero;
+		else if (hw_pos == HW_POINT_POSITION_RIGHT)
+			point->coeff = dal_fixed31_32_from_int(2);
+		else {
+			BREAK_TO_DEBUGGER();
+			return false;
+		}
+
+		point->left_index = index_left;
+		point->right_index = index_right;
+		point->pos = hw_pos;
+
+		++i;
+	}
+
+	return true;
+}
+
+static inline bool build_oem_custom_gamma_mapping_coefficients(
+	struct pixel_gamma_point *coeff128_oem,
+	const struct hw_x_point *coordinates_x,
+	const struct gamma_pixel *axis_x_256,
+	uint32_t number_of_points,
+	enum surface_pixel_format pixel_format)
+{
+	int i;
+
+	for (i = 0; i < 3; i++) {
+		if (!build_custom_gamma_mapping_coefficients_worker(
+				coeff128_oem, coordinates_x, axis_x_256, i,
+				number_of_points, pixel_format))
+			return false;
+	}
+	return true;
+}
+
+static struct fixed31_32 calculate_mapped_value(
+	struct pwl_float_data *rgb,
+	const struct pixel_gamma_point *coeff,
+	enum channel_name channel,
+	uint32_t max_index)
+{
+	const struct gamma_point *point;
+
+	struct fixed31_32 result;
+
+	if (channel == CHANNEL_NAME_RED)
+		point = &coeff->r;
+	else if (channel == CHANNEL_NAME_GREEN)
+		point = &coeff->g;
+	else
+		point = &coeff->b;
+
+	if ((point->left_index < 0) || (point->left_index > max_index)) {
+		BREAK_TO_DEBUGGER();
+		return dal_fixed31_32_zero;
+	}
+
+	if ((point->right_index < 0) || (point->right_index > max_index)) {
+		BREAK_TO_DEBUGGER();
+		return dal_fixed31_32_zero;
+	}
+
+	if (point->pos == HW_POINT_POSITION_MIDDLE)
+		if (channel == CHANNEL_NAME_RED)
+			result = dal_fixed31_32_add(
+				dal_fixed31_32_mul(
+					point->coeff,
+					dal_fixed31_32_sub(
+						rgb[point->right_index].r,
+						rgb[point->left_index].r)),
+				rgb[point->left_index].r);
+		else if (channel == CHANNEL_NAME_GREEN)
+			result = dal_fixed31_32_add(
+				dal_fixed31_32_mul(
+					point->coeff,
+					dal_fixed31_32_sub(
+						rgb[point->right_index].g,
+						rgb[point->left_index].g)),
+				rgb[point->left_index].g);
+		else
+			result = dal_fixed31_32_add(
+				dal_fixed31_32_mul(
+					point->coeff,
+					dal_fixed31_32_sub(
+						rgb[point->right_index].b,
+						rgb[point->left_index].b)),
+				rgb[point->left_index].b);
+	else if (point->pos == HW_POINT_POSITION_LEFT) {
+		BREAK_TO_DEBUGGER();
+		result = dal_fixed31_32_zero;
+	} else {
+		BREAK_TO_DEBUGGER();
+		result = dal_fixed31_32_one;
+	}
+
+	return result;
+}
+
+static inline struct fixed31_32 calculate_oem_mapped_value(
+	struct pwl_float_data *rgb_oem,
+	const struct pixel_gamma_point *coeff,
+	uint32_t index,
+	enum channel_name channel,
+	uint32_t max_index)
+{
+	return calculate_mapped_value(
+			rgb_oem,
+			coeff + index,
+			channel,
+			max_index);
+}
+
+static void build_regamma_curve(struct pwl_float_data_ex *rgb_regamma,
+		struct pwl_float_data *rgb_oem,
+		struct pixel_gamma_point *coeff128_oem,
+		const struct core_gamma *ramp,
+		const struct core_surface *surface,
+		uint32_t hw_points_num,
+		const struct hw_x_point *coordinate_x,
+		const struct gamma_pixel *axis_x,
+		struct dividers dividers)
+{
+	uint32_t i;
+
+	struct gamma_coefficients coeff;
+	struct pwl_float_data_ex *rgb = rgb_regamma;
+	const struct hw_x_point *coord_x = coordinate_x;
+
+	build_regamma_coefficients(&coeff);
+
+	/* Use opp110->regamma.coordinates_x to retrieve
+	 * coordinates chosen base on given user curve (future task).
+	 * The x values are exponentially distributed and currently
+	 * it is hard-coded, the user curve shape is ignored.
+	 * The future task is to recalculate opp110-
+	 * regamma.coordinates_x based on input/user curve,
+	 * translation from 256/1025 to 128 pwl points.
+	 */
+
+	i = 0;
+
+	while (i != hw_points_num + 1) {
+		rgb->r = translate_from_linear_space_ex(
+			coord_x->adjusted_x, &coeff, 0);
+		rgb->g = translate_from_linear_space_ex(
+			coord_x->adjusted_x, &coeff, 1);
+		rgb->b = translate_from_linear_space_ex(
+			coord_x->adjusted_x, &coeff, 2);
+
+		++coord_x;
+		++rgb;
+		++i;
+	}
+}
+
+static bool scale_gamma(struct pwl_float_data *pwl_rgb,
+		const struct core_gamma *ramp,
+		struct dividers dividers)
+{
+	const struct dc_gamma_ramp_rgb256x3x16 *gamma;
+	const uint16_t max_driver = 0xFFFF;
+	const uint16_t max_os = 0xFF00;
+	uint16_t scaler = max_os;
+	uint32_t i;
+	struct pwl_float_data *rgb = pwl_rgb;
+	struct pwl_float_data *rgb_last = rgb + RGB_256X3X16 - 1;
+
+	if (ramp->public.type == GAMMA_RAMP_RBG256X3X16)
+		gamma = &ramp->public.gamma_ramp_rgb256x3x16;
+	else
+		return false; /* invalid option */
+
+	i = 0;
+
+	do {
+		if ((gamma->red[i] > max_os) ||
+			(gamma->green[i] > max_os) ||
+			(gamma->blue[i] > max_os)) {
+			scaler = max_driver;
+			break;
+		}
+		++i;
+	} while (i != RGB_256X3X16);
+
+	i = 0;
+
+	do {
+		rgb->r = dal_fixed31_32_from_fraction(
+			gamma->red[i], scaler);
+		rgb->g = dal_fixed31_32_from_fraction(
+			gamma->green[i], scaler);
+		rgb->b = dal_fixed31_32_from_fraction(
+			gamma->blue[i], scaler);
+
+		++rgb;
+		++i;
+	} while (i != RGB_256X3X16);
+
+	rgb->r = dal_fixed31_32_mul(rgb_last->r,
+			dividers.divider1);
+	rgb->g = dal_fixed31_32_mul(rgb_last->g,
+			dividers.divider1);
+	rgb->b = dal_fixed31_32_mul(rgb_last->b,
+			dividers.divider1);
+
+	++rgb;
+
+	rgb->r = dal_fixed31_32_mul(rgb_last->r,
+			dividers.divider2);
+	rgb->g = dal_fixed31_32_mul(rgb_last->g,
+			dividers.divider2);
+	rgb->b = dal_fixed31_32_mul(rgb_last->b,
+			dividers.divider2);
+
+	++rgb;
+
+	rgb->r = dal_fixed31_32_mul(rgb_last->r,
+			dividers.divider3);
+	rgb->g = dal_fixed31_32_mul(rgb_last->g,
+			dividers.divider3);
+	rgb->b = dal_fixed31_32_mul(rgb_last->b,
+			dividers.divider3);
+
+	return true;
+}
+
+static void build_evenly_distributed_points(
+	struct gamma_pixel *points,
+	uint32_t numberof_points,
+	struct fixed31_32 max_value,
+	struct dividers dividers)
+{
+	struct gamma_pixel *p = points;
+	struct gamma_pixel *p_last = p + numberof_points - 1;
+
+	uint32_t i = 0;
+
+	do {
+		struct fixed31_32 value = dal_fixed31_32_div_int(
+			dal_fixed31_32_mul_int(max_value, i),
+			numberof_points - 1);
+
+		p->r = value;
+		p->g = value;
+		p->b = value;
+
+		++p;
+		++i;
+	} while (i != numberof_points);
+
+	p->r = dal_fixed31_32_div(p_last->r, dividers.divider1);
+	p->g = dal_fixed31_32_div(p_last->g, dividers.divider1);
+	p->b = dal_fixed31_32_div(p_last->b, dividers.divider1);
+
+	++p;
+
+	p->r = dal_fixed31_32_div(p_last->r, dividers.divider2);
+	p->g = dal_fixed31_32_div(p_last->g, dividers.divider2);
+	p->b = dal_fixed31_32_div(p_last->b, dividers.divider2);
+
+	++p;
+
+	p->r = dal_fixed31_32_div(p_last->r, dividers.divider3);
+	p->g = dal_fixed31_32_div(p_last->g, dividers.divider3);
+	p->b = dal_fixed31_32_div(p_last->b, dividers.divider3);
+}
+
+static inline void copy_rgb_regamma_to_coordinates_x(
+		struct hw_x_point *coordinates_x,
+		uint32_t hw_points_num,
+		const struct pwl_float_data_ex *rgb_ex)
+{
+	struct hw_x_point *coords = coordinates_x;
+	uint32_t i = 0;
+	const struct pwl_float_data_ex *rgb_regamma = rgb_ex;
+
+	while (i <= hw_points_num) {
+		coords->regamma_y_red = rgb_regamma->r;
+		coords->regamma_y_green = rgb_regamma->g;
+		coords->regamma_y_blue = rgb_regamma->b;
+
+		++coords;
+		++rgb_regamma;
+		++i;
+	}
+}
+
+static bool calculate_interpolated_hardware_curve(
+	struct pwl_result_data *rgb,
+	struct pixel_gamma_point *coeff128,
+	struct pwl_float_data *rgb_user,
+	const struct hw_x_point *coordinates_x,
+	const struct gamma_pixel *axis_x_256,
+	uint32_t number_of_points,
+	enum surface_pixel_format pixel_format)
+{
+
+	const struct pixel_gamma_point *coeff;
+	struct pixel_gamma_point *coeff_128 = coeff128;
+	uint32_t max_entries = 3 - 1;
+	struct pwl_result_data *rgb_resulted = rgb;
+
+	uint32_t i = 0;
+
+	if (!build_oem_custom_gamma_mapping_coefficients(
+			coeff_128, coordinates_x, axis_x_256,
+			number_of_points,
+			pixel_format))
+		return false;
+
+	coeff = coeff128;
+	max_entries += RGB_256X3X16;
+
+	/* TODO: float point case */
+
+	while (i <= number_of_points) {
+		rgb_resulted->red = calculate_mapped_value(
+			rgb_user, coeff, CHANNEL_NAME_RED, max_entries);
+		rgb_resulted->green = calculate_mapped_value(
+			rgb_user, coeff, CHANNEL_NAME_GREEN, max_entries);
+		rgb_resulted->blue = calculate_mapped_value(
+			rgb_user, coeff, CHANNEL_NAME_BLUE, max_entries);
+
+		++coeff;
+		++rgb_resulted;
+		++i;
+	}
+
+	return true;
+}
+
+static bool map_regamma_hw_to_x_user(
+	struct pixel_gamma_point *coeff128,
+	struct pwl_float_data *rgb_oem,
+	struct pwl_result_data *rgb_resulted,
+	struct pwl_float_data *rgb_user,
+	struct hw_x_point *coords_x,
+	const struct gamma_pixel *axis_x,
+	const struct dc_gamma *gamma,
+	const struct pwl_float_data_ex *rgb_regamma,
+	struct dividers dividers,
+	uint32_t hw_points_num,
+	const struct core_surface *surface)
+{
+	/* setup to spare calculated ideal regamma values */
+
+	struct pixel_gamma_point *coeff = coeff128;
+
+	struct hw_x_point *coords = coords_x;
+
+	copy_rgb_regamma_to_coordinates_x(coords, hw_points_num, rgb_regamma);
+
+	return calculate_interpolated_hardware_curve(
+			rgb_resulted, coeff, rgb_user, coords, axis_x,
+			hw_points_num, surface->public.format);
+}
+
+static void build_new_custom_resulted_curve(
+	struct pwl_result_data *rgb_resulted,
+	uint32_t hw_points_num)
+{
+	struct pwl_result_data *rgb = rgb_resulted;
+	struct pwl_result_data *rgb_plus_1 = rgb + 1;
+
+	uint32_t i;
+
+	i = 0;
+
+	while (i != hw_points_num + 1) {
+		rgb->red = dal_fixed31_32_clamp(
+			rgb->red, dal_fixed31_32_zero,
+			dal_fixed31_32_one);
+		rgb->green = dal_fixed31_32_clamp(
+			rgb->green, dal_fixed31_32_zero,
+			dal_fixed31_32_one);
+		rgb->blue = dal_fixed31_32_clamp(
+			rgb->blue, dal_fixed31_32_zero,
+			dal_fixed31_32_one);
+
+		++rgb;
+		++i;
+	}
+
+	rgb = rgb_resulted;
+
+	i = 1;
+
+	while (i != hw_points_num + 1) {
+		if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
+			rgb_plus_1->red = rgb->red;
+		if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
+			rgb_plus_1->green = rgb->green;
+		if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
+			rgb_plus_1->blue = rgb->blue;
+
+		rgb->delta_red = dal_fixed31_32_sub(
+			rgb_plus_1->red,
+			rgb->red);
+		rgb->delta_green = dal_fixed31_32_sub(
+			rgb_plus_1->green,
+			rgb->green);
+		rgb->delta_blue = dal_fixed31_32_sub(
+			rgb_plus_1->blue,
+			rgb->blue);
+
+		++rgb_plus_1;
+		++rgb;
+		++i;
+	}
+}
+
+static void rebuild_curve_configuration_magic(
+		struct curve_points *arr_points,
+		struct pwl_result_data *rgb_resulted,
+		const struct hw_x_point *coordinates_x,
+		uint32_t hw_points_num)
+{
+	const struct fixed31_32 magic_number =
+		dal_fixed31_32_from_fraction(249, 1000);
+
+	struct fixed31_32 y_r;
+	struct fixed31_32 y_g;
+	struct fixed31_32 y_b;
+
+	struct fixed31_32 y1_min;
+	struct fixed31_32 y2_max;
+	struct fixed31_32 y3_max;
+
+	y_r = rgb_resulted[0].red;
+	y_g = rgb_resulted[0].green;
+	y_b = rgb_resulted[0].blue;
+
+	y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
+
+	arr_points[0].x = coordinates_x[0].adjusted_x;
+	arr_points[0].y = y1_min;
+	arr_points[0].slope = dal_fixed31_32_div(
+					arr_points[0].y,
+					arr_points[0].x);
+
+	arr_points[1].x = dal_fixed31_32_add(
+			coordinates_x[hw_points_num - 1].adjusted_x,
+			magic_number);
+
+	arr_points[2].x = arr_points[1].x;
+
+	y_r = rgb_resulted[hw_points_num - 1].red;
+	y_g = rgb_resulted[hw_points_num - 1].green;
+	y_b = rgb_resulted[hw_points_num - 1].blue;
+
+	y2_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
+
+	arr_points[1].y = y2_max;
+
+	y_r = rgb_resulted[hw_points_num].red;
+	y_g = rgb_resulted[hw_points_num].green;
+	y_b = rgb_resulted[hw_points_num].blue;
+
+	y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
+
+	arr_points[2].y = y3_max;
+
+	arr_points[2].slope = dal_fixed31_32_one;
+}
+
+static bool convert_to_custom_float_format(
+	struct fixed31_32 value,
+	const struct custom_float_format *format,
+	uint32_t *result)
+{
+	uint32_t mantissa;
+	uint32_t exponenta;
+	bool negative;
+
+	return build_custom_float(
+		value, format, &negative, &mantissa, &exponenta) &&
+	setup_custom_float(
+		format, negative, mantissa, exponenta, result);
+}
+
+static bool convert_to_custom_float(
+		struct pwl_result_data *rgb_resulted,
+		struct curve_points *arr_points,
+		uint32_t hw_points_num)
+{
+	struct custom_float_format fmt;
+
+	struct pwl_result_data *rgb = rgb_resulted;
+
+	uint32_t i = 0;
+
+	fmt.exponenta_bits = 6;
+	fmt.mantissa_bits = 12;
+	fmt.sign = true;
+
+	if (!convert_to_custom_float_format(
+		arr_points[0].x,
+		&fmt,
+		&arr_points[0].custom_float_x)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	if (!convert_to_custom_float_format(
+		arr_points[0].offset,
+		&fmt,
+		&arr_points[0].custom_float_offset)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	if (!convert_to_custom_float_format(
+		arr_points[0].slope,
+		&fmt,
+		&arr_points[0].custom_float_slope)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	fmt.mantissa_bits = 10;
+	fmt.sign = false;
+
+	if (!convert_to_custom_float_format(
+		arr_points[1].x,
+		&fmt,
+		&arr_points[1].custom_float_x)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	if (!convert_to_custom_float_format(
+		arr_points[1].y,
+		&fmt,
+		&arr_points[1].custom_float_y)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	if (!convert_to_custom_float_format(
+		arr_points[2].slope,
+		&fmt,
+		&arr_points[2].custom_float_slope)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	fmt.mantissa_bits = 12;
+	fmt.sign = true;
+
+	while (i != hw_points_num) {
+		if (!convert_to_custom_float_format(
+			rgb->red,
+			&fmt,
+			&rgb->red_reg)) {
+			BREAK_TO_DEBUGGER();
+			return false;
+		}
+
+		if (!convert_to_custom_float_format(
+			rgb->green,
+			&fmt,
+			&rgb->green_reg)) {
+			BREAK_TO_DEBUGGER();
+			return false;
+		}
+
+		if (!convert_to_custom_float_format(
+			rgb->blue,
+			&fmt,
+			&rgb->blue_reg)) {
+			BREAK_TO_DEBUGGER();
+			return false;
+		}
+
+		if (!convert_to_custom_float_format(
+			rgb->delta_red,
+			&fmt,
+			&rgb->delta_red_reg)) {
+			BREAK_TO_DEBUGGER();
+			return false;
+		}
+
+		if (!convert_to_custom_float_format(
+			rgb->delta_green,
+			&fmt,
+			&rgb->delta_green_reg)) {
+			BREAK_TO_DEBUGGER();
+			return false;
+		}
+
+		if (!convert_to_custom_float_format(
+			rgb->delta_blue,
+			&fmt,
+			&rgb->delta_blue_reg)) {
+			BREAK_TO_DEBUGGER();
+			return false;
+		}
+
+		++rgb;
+		++i;
+	}
+
+	return true;
+}
+
+bool calculate_regamma_params(struct pwl_params *params,
+		const struct core_gamma *ramp,
+		const struct core_surface *surface)
+{
+	struct gamma_curve *arr_curve_points = params->arr_curve_points;
+	struct curve_points *arr_points = params->arr_points;
+	struct pwl_result_data *rgb_resulted = params->rgb_resulted;
+	struct dividers dividers;
+
+	struct hw_x_point *coordinates_x = NULL;
+	struct pwl_float_data *rgb_user = NULL ;
+	struct pwl_float_data_ex *rgb_regamma = NULL;
+	struct pwl_float_data *rgb_oem = NULL;
+	struct gamma_pixel *axix_x_256 = NULL;
+	struct pixel_gamma_point *coeff128_oem = NULL;
+	struct pixel_gamma_point *coeff128 = NULL;
+
+
+	bool ret = false;
+
+	coordinates_x = dm_alloc(sizeof(*coordinates_x)*(256 + 3));
+	if (!coordinates_x)
+		goto coordinates_x_alloc_fail;
+	rgb_user = dm_alloc(sizeof(*rgb_user) * (FLOAT_GAMMA_RAMP_MAX + 3));
+	if (!rgb_user)
+		goto rgb_user_alloc_fail;
+	rgb_regamma = dm_alloc(sizeof(*rgb_regamma) * (256 + 3));
+	if (!rgb_regamma)
+		goto rgb_regamma_alloc_fail;
+	rgb_oem = dm_alloc(sizeof(*rgb_oem) * (FLOAT_GAMMA_RAMP_MAX + 3));
+	if (!rgb_oem)
+		goto rgb_oem_alloc_fail;
+	axix_x_256 = dm_alloc(sizeof(*axix_x_256) * (256 + 3));
+	if (!axix_x_256)
+		goto axix_x_256_alloc_fail;
+	coeff128_oem = dm_alloc(sizeof(*coeff128_oem) * (256 + 3));
+	if (!coeff128_oem)
+		goto coeff128_oem_alloc_fail;
+	coeff128 = dm_alloc(sizeof(*coeff128) * (256 + 3));
+	if (!coeff128)
+		goto coeff128_alloc_fail;
+
+	dividers.divider1 = dal_fixed31_32_from_fraction(3, 2);
+	dividers.divider2 = dal_fixed31_32_from_int(2);
+	dividers.divider3 = dal_fixed31_32_from_fraction(5, 2);
+
+	build_evenly_distributed_points(
+			axix_x_256,
+			256,
+			dal_fixed31_32_one,
+			dividers);
+
+	scale_gamma(rgb_user, ramp, dividers);
+
+	setup_distribution_points(arr_curve_points, arr_points,
+			&params->hw_points_num, coordinates_x);
+
+	build_regamma_curve(rgb_regamma, rgb_oem, coeff128_oem,
+			ramp, surface, params->hw_points_num,
+			coordinates_x, axix_x_256, dividers);
+
+	map_regamma_hw_to_x_user(coeff128, rgb_oem, rgb_resulted, rgb_user,
+			coordinates_x, axix_x_256, &ramp->public, rgb_regamma,
+			dividers, params->hw_points_num, surface);
+
+	build_new_custom_resulted_curve(rgb_resulted, params->hw_points_num);
+
+	rebuild_curve_configuration_magic(
+			arr_points,
+			rgb_resulted,
+			coordinates_x,
+			params->hw_points_num);
+
+	convert_to_custom_float(rgb_resulted, arr_points,
+			params->hw_points_num);
+
+	ret = true;
+
+	dm_free(coeff128);
+coeff128_alloc_fail:
+	dm_free(coeff128_oem);
+coeff128_oem_alloc_fail:
+	dm_free(axix_x_256);
+axix_x_256_alloc_fail:
+	dm_free(rgb_oem);
+rgb_oem_alloc_fail:
+	dm_free(rgb_regamma);
+rgb_regamma_alloc_fail:
+	dm_free(rgb_user);
+rgb_user_alloc_fail:
+	dm_free(coordinates_x);
+coordinates_x_alloc_fail:
+	return ret;
+
+}
+

+ 1846 - 0
drivers/gpu/drm/amd/display/dc/core/dc.c

@@ -0,0 +1,1846 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#include "dm_services.h"
+
+#include "dc.h"
+
+#include "core_status.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+
+#include "resource.h"
+
+#include "clock_source.h"
+#include "dc_bios_types.h"
+
+#include "bandwidth_calcs.h"
+#include "bios_parser_interface.h"
+#include "include/irq_service_interface.h"
+#include "transform.h"
+#include "timing_generator.h"
+#include "virtual/virtual_link_encoder.h"
+
+#include "link_hwss.h"
+#include "link_encoder.h"
+
+#include "dc_link_ddc.h"
+#include "dm_helpers.h"
+#include "mem_input.h"
+
+/*******************************************************************************
+ * Private structures
+ ******************************************************************************/
+
+struct dc_target_sync_report {
+	uint32_t h_count;
+	uint32_t v_count;
+};
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void destroy_links(struct core_dc *dc)
+{
+	uint32_t i;
+
+	for (i = 0; i < dc->link_count; i++) {
+		if (NULL != dc->links[i])
+			link_destroy(&dc->links[i]);
+	}
+}
+
+static bool create_links(
+		struct core_dc *dc,
+		uint32_t num_virtual_links)
+{
+	int i;
+	int connectors_num;
+	struct dc_bios *bios = dc->ctx->dc_bios;
+
+	dc->link_count = 0;
+
+	connectors_num = bios->funcs->get_connectors_number(bios);
+
+	if (connectors_num > ENUM_ID_COUNT) {
+		dm_error(
+			"DC: Number of connectors %d exceeds maximum of %d!\n",
+			connectors_num,
+			ENUM_ID_COUNT);
+		return false;
+	}
+
+	if (connectors_num == 0 && num_virtual_links == 0) {
+		dm_error("DC: Number of connectors is zero!\n");
+	}
+
+	dm_output_to_console(
+		"DC: %s: connectors_num: physical:%d, virtual:%d\n",
+		__func__,
+		connectors_num,
+		num_virtual_links);
+
+	for (i = 0; i < connectors_num; i++) {
+		struct link_init_data link_init_params = {0};
+		struct core_link *link;
+
+		link_init_params.ctx = dc->ctx;
+		link_init_params.connector_index = i;
+		link_init_params.link_index = dc->link_count;
+		link_init_params.dc = dc;
+		link = link_create(&link_init_params);
+
+		if (link) {
+			dc->links[dc->link_count] = link;
+			link->dc = dc;
+			++dc->link_count;
+		} else {
+			dm_error("DC: failed to create link!\n");
+		}
+	}
+
+	for (i = 0; i < num_virtual_links; i++) {
+		struct core_link *link = dm_alloc(sizeof(*link));
+		struct encoder_init_data enc_init = {0};
+
+		if (link == NULL) {
+			BREAK_TO_DEBUGGER();
+			goto failed_alloc;
+		}
+
+		link->ctx = dc->ctx;
+		link->dc = dc;
+		link->public.connector_signal = SIGNAL_TYPE_VIRTUAL;
+		link->link_id.type = OBJECT_TYPE_CONNECTOR;
+		link->link_id.id = CONNECTOR_ID_VIRTUAL;
+		link->link_id.enum_id = ENUM_ID_1;
+		link->link_enc = dm_alloc(sizeof(*link->link_enc));
+
+		enc_init.ctx = dc->ctx;
+		enc_init.channel = CHANNEL_ID_UNKNOWN;
+		enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
+		enc_init.transmitter = TRANSMITTER_UNKNOWN;
+		enc_init.connector = link->link_id;
+		enc_init.encoder.type = OBJECT_TYPE_ENCODER;
+		enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
+		enc_init.encoder.enum_id = ENUM_ID_1;
+		virtual_link_encoder_construct(link->link_enc, &enc_init);
+
+		link->public.link_index = dc->link_count;
+		dc->links[dc->link_count] = link;
+		dc->link_count++;
+	}
+
+	return true;
+
+failed_alloc:
+	return false;
+}
+
+static bool stream_adjust_vmin_vmax(struct dc *dc,
+		const struct dc_stream **stream, int num_streams,
+		int vmin, int vmax)
+{
+	/* TODO: Support multiple streams */
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
+	int i = 0;
+	bool ret = false;
+	struct pipe_ctx *pipes;
+	unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (core_dc->current_context->res_ctx.pipe_ctx[i].stream == core_stream
+				&& i != underlay_idx) {
+
+			pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
+			core_dc->hwss.set_drr(&pipes, 1, vmin, vmax);
+
+			/* build and update the info frame */
+			resource_build_info_frame(pipes);
+			core_dc->hwss.update_info_frame(pipes);
+
+			ret = true;
+		}
+	}
+
+	return ret;
+}
+
+
+static bool set_gamut_remap(struct dc *dc,
+			const struct dc_stream **stream, int num_streams)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
+	int i = 0;
+	bool ret = false;
+	struct pipe_ctx *pipes;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
+				== core_stream) {
+
+			pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
+			core_dc->hwss.set_plane_config(core_dc, pipes,
+					&core_dc->current_context->res_ctx);
+			ret = true;
+		}
+	}
+
+	return ret;
+}
+
+/* This function is not expected to fail, proper implementation of
+ * validation will prevent this from ever being called for unsupported
+ * configurations.
+ */
+static void stream_update_scaling(
+		const struct dc *dc,
+		const struct dc_stream *dc_stream,
+		const struct rect *src,
+		const struct rect *dst)
+{
+	struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	struct validate_context *cur_ctx = core_dc->current_context;
+	int i, j;
+
+	if (src)
+		stream->public.src = *src;
+
+	if (dst)
+		stream->public.dst = *dst;
+
+	for (i = 0; i < cur_ctx->target_count; i++) {
+		struct core_target *target = cur_ctx->targets[i];
+		struct dc_target_status *status = &cur_ctx->target_status[i];
+
+		for (j = 0; j < target->public.stream_count; j++) {
+			if (target->public.streams[j] != dc_stream)
+				continue;
+
+			if (status->surface_count)
+				if (!dc_commit_surfaces_to_target(
+						&core_dc->public,
+						status->surfaces,
+						status->surface_count,
+						&target->public))
+					/* Need to debug validation */
+					BREAK_TO_DEBUGGER();
+
+			return;
+		}
+	}
+}
+
+static bool set_backlight(struct dc *dc, unsigned int backlight_level,
+			unsigned int frame_ramp, const struct dc_stream *stream)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	int i;
+
+	if (stream->sink->sink_signal == SIGNAL_TYPE_EDP) {
+		for (i = 0; i < core_dc->link_count; i++)
+			dc_link_set_backlight_level(&core_dc->links[i]->public,
+					backlight_level, frame_ramp, stream);
+	}
+
+	return true;
+
+}
+
+static bool init_dmcu_backlight_settings(struct dc *dc)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	int i;
+
+	for (i = 0; i < core_dc->link_count; i++)
+		dc_link_init_dmcu_backlight_settings
+			(&core_dc->links[i]->public);
+
+	return true;
+}
+
+
+static bool set_abm_level(struct dc *dc, unsigned int abm_level)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	int i;
+
+	for (i = 0; i < core_dc->link_count; i++)
+		dc_link_set_abm_level(&core_dc->links[i]->public,
+				abm_level);
+
+	return true;
+}
+
+static bool set_psr_enable(struct dc *dc, bool enable)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	int i;
+
+	for (i = 0; i < core_dc->link_count; i++)
+		dc_link_set_psr_enable(&core_dc->links[i]->public,
+				enable);
+
+	return true;
+}
+
+
+static bool setup_psr(struct dc *dc, const struct dc_stream *stream)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
+	struct pipe_ctx *pipes;
+	int i;
+	unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
+
+	for (i = 0; i < core_dc->link_count; i++) {
+		if (core_stream->sink->link == core_dc->links[i])
+			dc_link_setup_psr(&core_dc->links[i]->public,
+					stream);
+	}
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
+				== core_stream && i != underlay_idx) {
+			pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
+			core_dc->hwss.set_static_screen_control(&pipes, 1,
+					0x182);
+		}
+	}
+
+	return true;
+}
+
+static void set_drive_settings(struct dc *dc,
+		struct link_training_settings *lt_settings)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	int i;
+
+	for (i = 0; i < core_dc->link_count; i++)
+		dc_link_dp_set_drive_settings(&core_dc->links[i]->public,
+				lt_settings);
+}
+
+static void perform_link_training(struct dc *dc,
+		struct dc_link_settings *link_setting,
+		bool skip_video_pattern)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	int i;
+
+	for (i = 0; i < core_dc->link_count; i++)
+		dc_link_dp_perform_link_training(
+			&core_dc->links[i]->public,
+			link_setting,
+			skip_video_pattern);
+}
+
+static void set_preferred_link_settings(struct dc *dc,
+		struct dc_link_settings *link_setting)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	int i;
+
+	for (i = 0; i < core_dc->link_count; i++) {
+		core_dc->links[i]->public.verified_link_cap.lane_count =
+				link_setting->lane_count;
+		core_dc->links[i]->public.verified_link_cap.link_rate =
+				link_setting->link_rate;
+	}
+}
+
+static void enable_hpd(const struct dc_link *link)
+{
+	dc_link_dp_enable_hpd(link);
+}
+
+static void disable_hpd(const struct dc_link *link)
+{
+	dc_link_dp_disable_hpd(link);
+}
+
+
+static void set_test_pattern(
+		const struct dc_link *link,
+		enum dp_test_pattern test_pattern,
+		const struct link_training_settings *p_link_settings,
+		const unsigned char *p_custom_pattern,
+		unsigned int cust_pattern_size)
+{
+	if (link != NULL)
+		dc_link_dp_set_test_pattern(
+			link,
+			test_pattern,
+			p_link_settings,
+			p_custom_pattern,
+			cust_pattern_size);
+}
+
+static void allocate_dc_stream_funcs(struct core_dc *core_dc)
+{
+	core_dc->public.stream_funcs.stream_update_scaling = stream_update_scaling;
+	if (core_dc->hwss.set_drr != NULL) {
+		core_dc->public.stream_funcs.adjust_vmin_vmax =
+				stream_adjust_vmin_vmax;
+	}
+
+	core_dc->public.stream_funcs.set_gamut_remap =
+			set_gamut_remap;
+
+	core_dc->public.stream_funcs.set_backlight =
+			set_backlight;
+
+	core_dc->public.stream_funcs.init_dmcu_backlight_settings =
+			init_dmcu_backlight_settings;
+
+	core_dc->public.stream_funcs.set_abm_level =
+			set_abm_level;
+
+	core_dc->public.stream_funcs.set_psr_enable =
+			set_psr_enable;
+
+	core_dc->public.stream_funcs.setup_psr =
+			setup_psr;
+
+	core_dc->public.link_funcs.set_drive_settings =
+			set_drive_settings;
+
+	core_dc->public.link_funcs.perform_link_training =
+			perform_link_training;
+
+	core_dc->public.link_funcs.set_preferred_link_settings =
+			set_preferred_link_settings;
+
+	core_dc->public.link_funcs.enable_hpd =
+			enable_hpd;
+
+	core_dc->public.link_funcs.disable_hpd =
+			disable_hpd;
+
+	core_dc->public.link_funcs.set_test_pattern =
+			set_test_pattern;
+}
+
+static void destruct(struct core_dc *dc)
+{
+	resource_validate_ctx_destruct(dc->current_context);
+
+	dm_free(dc->temp_flip_context);
+	dc->temp_flip_context = NULL;
+
+	destroy_links(dc);
+
+	dc_destroy_resource_pool(dc);
+
+	if (dc->ctx->gpio_service)
+		dal_gpio_service_destroy(&dc->ctx->gpio_service);
+
+	if (dc->ctx->i2caux)
+		dal_i2caux_destroy(&dc->ctx->i2caux);
+
+	if (dc->ctx->created_bios)
+		dal_bios_parser_destroy(&dc->ctx->dc_bios);
+
+	if (dc->ctx->logger)
+		dal_logger_destroy(&dc->ctx->logger);
+
+	dm_free(dc->current_context);
+	dc->current_context = NULL;
+
+	dm_free(dc->ctx);
+	dc->ctx = NULL;
+}
+
+static bool construct(struct core_dc *dc,
+		const struct dc_init_data *init_params)
+{
+	struct dal_logger *logger;
+	struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
+	enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+
+	if (!dc_ctx) {
+		dm_error("%s: failed to create ctx\n", __func__);
+		goto ctx_fail;
+	}
+
+	dc->current_context = dm_alloc(sizeof(*dc->current_context));
+	dc->temp_flip_context = dm_alloc(sizeof(*dc->temp_flip_context));
+
+	if (!dc->current_context || !dc->temp_flip_context) {
+		dm_error("%s: failed to create validate ctx\n", __func__);
+		goto val_ctx_fail;
+	}
+
+	dc_ctx->cgs_device = init_params->cgs_device;
+	dc_ctx->driver_context = init_params->driver;
+	dc_ctx->dc = &dc->public;
+	dc_ctx->asic_id = init_params->asic_id;
+
+	/* Create logger */
+	logger = dal_logger_create(dc_ctx);
+
+	if (!logger) {
+		/* can *not* call logger. call base driver 'print error' */
+		dm_error("%s: failed to create Logger!\n", __func__);
+		goto logger_fail;
+	}
+	dc_ctx->logger = logger;
+	dc->ctx = dc_ctx;
+	dc->ctx->dce_environment = init_params->dce_environment;
+
+	dc_version = resource_parse_asic_id(init_params->asic_id);
+	dc->ctx->dce_version = dc_version;
+
+	/* Resource should construct all asic specific resources.
+	 * This should be the only place where we need to parse the asic id
+	 */
+	if (init_params->vbios_override)
+		dc_ctx->dc_bios = init_params->vbios_override;
+	else {
+		/* Create BIOS parser */
+		struct bp_init_data bp_init_data;
+		bp_init_data.ctx = dc_ctx;
+		bp_init_data.bios = init_params->asic_id.atombios_base_address;
+
+		dc_ctx->dc_bios = dal_bios_parser_create(
+				&bp_init_data, dc_version);
+
+		if (!dc_ctx->dc_bios) {
+			ASSERT_CRITICAL(false);
+			goto bios_fail;
+		}
+
+		dc_ctx->created_bios = true;
+	}
+
+	/* Create I2C AUX */
+	dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
+
+	if (!dc_ctx->i2caux) {
+		ASSERT_CRITICAL(false);
+		goto failed_to_create_i2caux;
+	}
+
+	/* Create GPIO service */
+	dc_ctx->gpio_service = dal_gpio_service_create(
+			dc_version,
+			dc_ctx->dce_environment,
+			dc_ctx);
+
+	if (!dc_ctx->gpio_service) {
+		ASSERT_CRITICAL(false);
+		goto gpio_fail;
+	}
+
+	dc->res_pool = dc_create_resource_pool(
+			dc,
+			init_params->num_virtual_links,
+			dc_version,
+			init_params->asic_id);
+	if (!dc->res_pool)
+		goto create_resource_fail;
+
+	if (!create_links(dc, init_params->num_virtual_links))
+		goto create_links_fail;
+
+	allocate_dc_stream_funcs(dc);
+
+	return true;
+
+	/**** error handling here ****/
+create_links_fail:
+create_resource_fail:
+gpio_fail:
+failed_to_create_i2caux:
+bios_fail:
+logger_fail:
+val_ctx_fail:
+ctx_fail:
+	destruct(dc);
+	return false;
+}
+
+/*
+void ProgramPixelDurationV(unsigned int pixelClockInKHz )
+{
+	fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
+	unsigned int pixDurationInPico = round(pixel_duration);
+
+	DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
+
+	arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
+	arb_control.bits.PIXEL_DURATION = pixDurationInPico;
+	WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
+
+	arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
+	arb_control.bits.PIXEL_DURATION = pixDurationInPico;
+	WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
+
+	WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
+	WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
+
+	WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
+	WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
+}
+*/
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+
+struct dc *dc_create(const struct dc_init_data *init_params)
+ {
+	struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
+	unsigned int full_pipe_count;
+
+	if (NULL == core_dc)
+		goto alloc_fail;
+
+	if (false == construct(core_dc, init_params))
+		goto construct_fail;
+
+	/*TODO: separate HW and SW initialization*/
+	core_dc->hwss.init_hw(core_dc);
+
+	full_pipe_count = core_dc->res_pool->pipe_count;
+	if (core_dc->res_pool->underlay_pipe_index >= 0)
+		full_pipe_count--;
+	core_dc->public.caps.max_targets = dm_min(
+			full_pipe_count,
+			core_dc->res_pool->stream_enc_count);
+
+	core_dc->public.caps.max_links = core_dc->link_count;
+	core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
+
+	core_dc->public.config = init_params->flags;
+
+	dm_logger_write(core_dc->ctx->logger, LOG_DC,
+			"Display Core initialized\n");
+
+
+	/* TODO: missing feature to be enabled */
+	core_dc->public.debug.disable_dfs_bypass = true;
+
+	return &core_dc->public;
+
+construct_fail:
+	dm_free(core_dc);
+
+alloc_fail:
+	return NULL;
+}
+
+void dc_destroy(struct dc **dc)
+{
+	struct core_dc *core_dc = DC_TO_CORE(*dc);
+	destruct(core_dc);
+	dm_free(core_dc);
+	*dc = NULL;
+}
+
+static bool is_validation_required(
+		const struct core_dc *dc,
+		const struct dc_validation_set set[],
+		int set_count)
+{
+	const struct validate_context *context = dc->current_context;
+	int i, j;
+
+	if (context->target_count != set_count)
+		return true;
+
+	for (i = 0; i < set_count; i++) {
+
+		if (set[i].surface_count != context->target_status[i].surface_count)
+			return true;
+		if (!is_target_unchanged(DC_TARGET_TO_CORE(set[i].target), context->targets[i]))
+			return true;
+
+		for (j = 0; j < set[i].surface_count; j++) {
+			struct dc_surface temp_surf = { 0 };
+
+			temp_surf = *context->target_status[i].surfaces[j];
+			temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
+			temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
+			temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
+
+			if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
+				return true;
+		}
+	}
+
+	return false;
+}
+
+bool dc_validate_resources(
+		const struct dc *dc,
+		const struct dc_validation_set set[],
+		uint8_t set_count)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	enum dc_status result = DC_ERROR_UNEXPECTED;
+	struct validate_context *context;
+
+	if (!is_validation_required(core_dc, set, set_count))
+		return true;
+
+	context = dm_alloc(sizeof(struct validate_context));
+	if(context == NULL)
+		goto context_alloc_fail;
+
+	result = core_dc->res_pool->funcs->validate_with_context(
+						core_dc, set, set_count, context);
+
+	resource_validate_ctx_destruct(context);
+	dm_free(context);
+
+context_alloc_fail:
+	if (result != DC_OK) {
+		dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
+				"%s:resource validation failed, dc_status:%d\n",
+				__func__,
+				result);
+	}
+
+	return (result == DC_OK);
+
+}
+
+bool dc_validate_guaranteed(
+		const struct dc *dc,
+		const struct dc_target *dc_target)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	enum dc_status result = DC_ERROR_UNEXPECTED;
+	struct validate_context *context;
+
+	context = dm_alloc(sizeof(struct validate_context));
+	if (context == NULL)
+		goto context_alloc_fail;
+
+	result = core_dc->res_pool->funcs->validate_guaranteed(
+					core_dc, dc_target, context);
+
+	resource_validate_ctx_destruct(context);
+	dm_free(context);
+
+context_alloc_fail:
+	if (result != DC_OK) {
+		dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
+			"%s:guaranteed validation failed, dc_status:%d\n",
+			__func__,
+			result);
+		}
+
+	return (result == DC_OK);
+}
+
+static void program_timing_sync(
+		struct core_dc *core_dc,
+		struct validate_context *ctx)
+{
+	int i, j;
+	int group_index = 0;
+	int pipe_count = ctx->res_ctx.pool->pipe_count;
+	struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
+
+	for (i = 0; i < pipe_count; i++) {
+		if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
+			continue;
+
+		unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
+	}
+
+	for (i = 0; i < pipe_count; i++) {
+		int group_size = 1;
+		struct pipe_ctx *pipe_set[MAX_PIPES];
+
+		if (!unsynced_pipes[i])
+			continue;
+
+		pipe_set[0] = unsynced_pipes[i];
+		unsynced_pipes[i] = NULL;
+
+		/* Add tg to the set, search rest of the tg's for ones with
+		 * same timing, add all tgs with same timing to the group
+		 */
+		for (j = i + 1; j < pipe_count; j++) {
+			if (!unsynced_pipes[j])
+				continue;
+
+			if (resource_are_streams_timing_synchronizable(
+					unsynced_pipes[j]->stream,
+					pipe_set[0]->stream)) {
+				pipe_set[group_size] = unsynced_pipes[j];
+				unsynced_pipes[j] = NULL;
+				group_size++;
+			}
+		}
+
+		/* set first unblanked pipe as master */
+		for (j = 0; j < group_size; j++) {
+			struct pipe_ctx *temp;
+
+			if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
+				if (j == 0)
+					break;
+
+				temp = pipe_set[0];
+				pipe_set[0] = pipe_set[j];
+				pipe_set[j] = temp;
+				break;
+			}
+		}
+
+		/* remove any other unblanked pipes as they have already been synced */
+		for (j = j + 1; j < group_size; j++) {
+			if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
+				group_size--;
+				pipe_set[j] = pipe_set[group_size];
+				j--;
+			}
+		}
+
+		if (group_size > 1) {
+			core_dc->hwss.enable_timing_synchronization(
+				core_dc, group_index, group_size, pipe_set);
+			group_index++;
+		}
+	}
+}
+
+static bool targets_changed(
+		struct core_dc *dc,
+		struct dc_target *targets[],
+		uint8_t target_count)
+{
+	uint8_t i;
+
+	if (target_count != dc->current_context->target_count)
+		return true;
+
+	for (i = 0; i < dc->current_context->target_count; i++) {
+		if (&dc->current_context->targets[i]->public != targets[i])
+			return true;
+	}
+
+	return false;
+}
+
+static void fill_display_configs(
+	const struct validate_context *context,
+	struct dm_pp_display_configuration *pp_display_cfg)
+{
+	uint8_t i, j, k;
+	uint8_t num_cfgs = 0;
+
+	for (i = 0; i < context->target_count; i++) {
+		const struct core_target *target = context->targets[i];
+
+		for (j = 0; j < target->public.stream_count; j++) {
+			const struct core_stream *stream =
+				DC_STREAM_TO_CORE(target->public.streams[j]);
+			struct dm_pp_single_disp_config *cfg =
+					&pp_display_cfg->disp_configs[num_cfgs];
+			const struct pipe_ctx *pipe_ctx = NULL;
+
+			for (k = 0; k < MAX_PIPES; k++)
+				if (stream ==
+					context->res_ctx.pipe_ctx[k].stream) {
+					pipe_ctx = &context->res_ctx.pipe_ctx[k];
+					break;
+				}
+
+			ASSERT(pipe_ctx != NULL);
+
+			num_cfgs++;
+			cfg->signal = pipe_ctx->stream->signal;
+			cfg->pipe_idx = pipe_ctx->pipe_idx;
+			cfg->src_height = stream->public.src.height;
+			cfg->src_width = stream->public.src.width;
+			cfg->ddi_channel_mapping =
+				stream->sink->link->ddi_channel_mapping.raw;
+			cfg->transmitter =
+				stream->sink->link->link_enc->transmitter;
+			cfg->link_settings.lane_count = stream->sink->link->public.cur_link_settings.lane_count;
+			cfg->link_settings.link_rate = stream->sink->link->public.cur_link_settings.link_rate;
+			cfg->link_settings.link_spread = stream->sink->link->public.cur_link_settings.link_spread;
+			cfg->sym_clock = stream->phy_pix_clk;
+			/* Round v_refresh*/
+			cfg->v_refresh = stream->public.timing.pix_clk_khz * 1000;
+			cfg->v_refresh /= stream->public.timing.h_total;
+			cfg->v_refresh = (cfg->v_refresh + stream->public.timing.v_total / 2)
+						/ stream->public.timing.v_total;
+		}
+	}
+	pp_display_cfg->display_count = num_cfgs;
+}
+
+static uint32_t get_min_vblank_time_us(const struct validate_context *context)
+{
+	uint8_t i, j;
+	uint32_t min_vertical_blank_time = -1;
+
+	for (i = 0; i < context->target_count; i++) {
+		const struct core_target *target = context->targets[i];
+
+		for (j = 0; j < target->public.stream_count; j++) {
+			const struct dc_stream *stream =
+						target->public.streams[j];
+			uint32_t vertical_blank_in_pixels = 0;
+			uint32_t vertical_blank_time = 0;
+
+			vertical_blank_in_pixels = stream->timing.h_total *
+				(stream->timing.v_total
+					- stream->timing.v_addressable);
+			vertical_blank_time = vertical_blank_in_pixels
+				* 1000 / stream->timing.pix_clk_khz;
+			if (min_vertical_blank_time > vertical_blank_time)
+				min_vertical_blank_time = vertical_blank_time;
+		}
+	}
+	return min_vertical_blank_time;
+}
+
+static int determine_sclk_from_bounding_box(
+		const struct core_dc *dc,
+		int required_sclk)
+{
+	int i;
+
+	/*
+	 * Some asics do not give us sclk levels, so we just report the actual
+	 * required sclk
+	 */
+	if (dc->sclk_lvls.num_levels == 0)
+		return required_sclk;
+
+	for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
+		if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
+			return dc->sclk_lvls.clocks_in_khz[i];
+	}
+	/*
+	 * even maximum level could not satisfy requirement, this
+	 * is unexpected at this stage, should have been caught at
+	 * validation time
+	 */
+	ASSERT(0);
+	return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
+}
+
+void pplib_apply_display_requirements(
+	struct core_dc *dc,
+	const struct validate_context *context,
+	struct dm_pp_display_configuration *pp_display_cfg)
+{
+	pp_display_cfg->all_displays_in_sync =
+		context->bw_results.all_displays_in_sync;
+	pp_display_cfg->nb_pstate_switch_disable =
+			context->bw_results.nbp_state_change_enable == false;
+	pp_display_cfg->cpu_cc6_disable =
+			context->bw_results.cpuc_state_change_enable == false;
+	pp_display_cfg->cpu_pstate_disable =
+			context->bw_results.cpup_state_change_enable == false;
+	pp_display_cfg->cpu_pstate_separation_time =
+			context->bw_results.blackout_recovery_time_us;
+
+	pp_display_cfg->min_memory_clock_khz = context->bw_results.required_yclk
+		/ MEMORY_TYPE_MULTIPLIER;
+
+	pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
+			dc,
+			context->bw_results.required_sclk);
+
+	pp_display_cfg->min_engine_clock_deep_sleep_khz
+			= context->bw_results.required_sclk_deep_sleep;
+
+	pp_display_cfg->avail_mclk_switch_time_us =
+						get_min_vblank_time_us(context);
+	/* TODO: dce11.2*/
+	pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+
+	pp_display_cfg->disp_clk_khz = context->bw_results.dispclk_khz;
+
+	fill_display_configs(context, pp_display_cfg);
+
+	/* TODO: is this still applicable?*/
+	if (pp_display_cfg->display_count == 1) {
+		const struct dc_crtc_timing *timing =
+			&context->targets[0]->public.streams[0]->timing;
+
+		pp_display_cfg->crtc_index =
+			pp_display_cfg->disp_configs[0].pipe_idx;
+		pp_display_cfg->line_time_in_us = timing->h_total * 1000
+							/ timing->pix_clk_khz;
+	}
+
+	if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
+			struct dm_pp_display_configuration)) !=  0)
+		dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+
+	dc->prev_display_config = *pp_display_cfg;
+
+}
+
+bool dc_commit_targets(
+	struct dc *dc,
+	struct dc_target *targets[],
+	uint8_t target_count)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	struct dc_bios *dcb = core_dc->ctx->dc_bios;
+	enum dc_status result = DC_ERROR_UNEXPECTED;
+	struct validate_context *context;
+	struct dc_validation_set set[MAX_TARGETS];
+	int i, j, k;
+
+	if (false == targets_changed(core_dc, targets, target_count))
+		return DC_OK;
+
+	dm_logger_write(core_dc->ctx->logger, LOG_DC,
+				"%s: %d targets\n",
+				__func__,
+				target_count);
+
+	for (i = 0; i < target_count; i++) {
+		struct dc_target *target = targets[i];
+
+		dc_target_log(target,
+				core_dc->ctx->logger,
+				LOG_DC);
+
+		set[i].target = targets[i];
+		set[i].surface_count = 0;
+
+	}
+
+	context = dm_alloc(sizeof(struct validate_context));
+	if (context == NULL)
+		goto context_alloc_fail;
+
+	result = core_dc->res_pool->funcs->validate_with_context(core_dc, set, target_count, context);
+	if (result != DC_OK){
+		dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
+					"%s: Context validation failed! dc_status:%d\n",
+					__func__,
+					result);
+		BREAK_TO_DEBUGGER();
+		resource_validate_ctx_destruct(context);
+		goto fail;
+	}
+
+	if (!dcb->funcs->is_accelerated_mode(dcb)) {
+		core_dc->hwss.enable_accelerated_mode(core_dc);
+	}
+
+	if (result == DC_OK) {
+		result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
+	}
+
+	program_timing_sync(core_dc, context);
+
+	for (i = 0; i < context->target_count; i++) {
+		struct dc_target *dc_target = &context->targets[i]->public;
+		struct core_sink *sink = DC_SINK_TO_CORE(dc_target->streams[0]->sink);
+
+		for (j = 0; j < context->target_status[i].surface_count; j++) {
+			const struct dc_surface *dc_surface =
+					context->target_status[i].surfaces[j];
+
+			for (k = 0; k < context->res_ctx.pool->pipe_count; k++) {
+				struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[k];
+
+				if (dc_surface != &pipe->surface->public
+						|| !dc_surface->visible)
+					continue;
+
+				pipe->tg->funcs->set_blank(pipe->tg, false);
+			}
+		}
+
+		CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
+				dc_target->streams[0]->timing.h_addressable,
+				dc_target->streams[0]->timing.v_addressable,
+				dc_target->streams[0]->timing.h_total,
+				dc_target->streams[0]->timing.v_total,
+				dc_target->streams[0]->timing.pix_clk_khz);
+	}
+
+	pplib_apply_display_requirements(core_dc,
+			context, &context->pp_display_cfg);
+
+	resource_validate_ctx_destruct(core_dc->current_context);
+
+	dm_free(core_dc->current_context);
+	core_dc->current_context = context;
+
+	return (result == DC_OK);
+
+fail:
+	dm_free(context);
+
+context_alloc_fail:
+	return (result == DC_OK);
+}
+
+bool dc_pre_update_surfaces_to_target(
+		struct dc *dc,
+		const struct dc_surface *const *new_surfaces,
+		uint8_t new_surface_count,
+		struct dc_target *dc_target)
+{
+	int i, j;
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	uint32_t prev_disp_clk = core_dc->current_context->bw_results.dispclk_khz;
+	struct core_target *target = DC_TARGET_TO_CORE(dc_target);
+	struct dc_target_status *target_status = NULL;
+	struct validate_context *context;
+	struct validate_context *temp_context;
+	bool ret = true;
+
+	pre_surface_trace(dc, new_surfaces, new_surface_count);
+
+	if (core_dc->current_context->target_count == 0)
+		return false;
+
+	/* Cannot commit surface to a target that is not commited */
+	for (i = 0; i < core_dc->current_context->target_count; i++)
+		if (target == core_dc->current_context->targets[i])
+			break;
+
+	if (i == core_dc->current_context->target_count)
+		return false;
+
+	target_status = &core_dc->current_context->target_status[i];
+
+	if (new_surface_count == target_status->surface_count) {
+		bool skip_pre = true;
+
+		for (i = 0; i < target_status->surface_count; i++) {
+			struct dc_surface temp_surf = { 0 };
+
+			temp_surf = *target_status->surfaces[i];
+			temp_surf.clip_rect = new_surfaces[i]->clip_rect;
+			temp_surf.dst_rect.x = new_surfaces[i]->dst_rect.x;
+			temp_surf.dst_rect.y = new_surfaces[i]->dst_rect.y;
+
+			if (memcmp(&temp_surf, new_surfaces[i], sizeof(temp_surf)) != 0) {
+				skip_pre = false;
+				break;
+			}
+		}
+
+		if (skip_pre)
+			return true;
+	}
+
+	context = dm_alloc(sizeof(struct validate_context));
+
+	if (!context) {
+		dm_error("%s: failed to create validate ctx\n", __func__);
+		ret = false;
+		goto val_ctx_fail;
+	}
+
+	resource_validate_ctx_copy_construct(core_dc->current_context, context);
+
+	dm_logger_write(core_dc->ctx->logger, LOG_DC,
+				"%s: commit %d surfaces to target 0x%x\n",
+				__func__,
+				new_surface_count,
+				dc_target);
+
+	if (!resource_attach_surfaces_to_context(
+			new_surfaces, new_surface_count, dc_target, context)) {
+		BREAK_TO_DEBUGGER();
+		ret = false;
+		goto unexpected_fail;
+	}
+
+	for (i = 0; i < new_surface_count; i++)
+		for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
+			if (context->res_ctx.pipe_ctx[j].surface !=
+					DC_SURFACE_TO_CORE(new_surfaces[i]))
+				continue;
+
+			resource_build_scaling_params(
+				new_surfaces[i], &context->res_ctx.pipe_ctx[j]);
+
+			if (dc->debug.surface_visual_confirm) {
+				context->res_ctx.pipe_ctx[j].scl_data.recout.height -= 2;
+				context->res_ctx.pipe_ctx[j].scl_data.recout.width -= 2;
+			}
+		}
+
+	if (core_dc->res_pool->funcs->validate_bandwidth(core_dc, context) != DC_OK) {
+		BREAK_TO_DEBUGGER();
+		ret = false;
+		goto unexpected_fail;
+	}
+
+	if (core_dc->res_pool->funcs->apply_clk_constraints) {
+		temp_context = core_dc->res_pool->funcs->apply_clk_constraints(
+				core_dc,
+				context);
+		if (!temp_context) {
+			dm_error("%s:failed apply clk constraints\n", __func__);
+			ret = false;
+			goto unexpected_fail;
+		}
+		resource_validate_ctx_destruct(context);
+		dm_free(context);
+		context = temp_context;
+	}
+
+	if (prev_disp_clk < context->bw_results.dispclk_khz) {
+		pplib_apply_display_requirements(core_dc, context,
+						&context->pp_display_cfg);
+		core_dc->hwss.set_display_clock(context);
+		core_dc->current_context->bw_results.dispclk_khz =
+				context->bw_results.dispclk_khz;
+	}
+
+	for (i = 0; i < new_surface_count; i++)
+		for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
+			if (context->res_ctx.pipe_ctx[j].surface !=
+					DC_SURFACE_TO_CORE(new_surfaces[i]))
+				continue;
+
+			core_dc->hwss.prepare_pipe_for_context(
+					core_dc,
+					&context->res_ctx.pipe_ctx[j],
+					context);
+
+			if (!new_surfaces[i]->visible)
+				context->res_ctx.pipe_ctx[j].tg->funcs->set_blank(
+						context->res_ctx.pipe_ctx[j].tg, true);
+		}
+
+unexpected_fail:
+	resource_validate_ctx_destruct(context);
+	dm_free(context);
+val_ctx_fail:
+
+	return ret;
+}
+
+bool dc_post_update_surfaces_to_target(struct dc *dc)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	int i;
+
+	post_surface_trace(dc);
+
+	for (i = 0; i < core_dc->current_context->res_ctx.pool->pipe_count; i++)
+		if (core_dc->current_context->res_ctx.pipe_ctx[i].stream == NULL)
+			core_dc->hwss.power_down_front_end(
+				core_dc, &core_dc->current_context->res_ctx.pipe_ctx[i]);
+
+	if (core_dc->res_pool->funcs->validate_bandwidth(core_dc, core_dc->current_context)
+			!= DC_OK) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	core_dc->hwss.set_bandwidth(core_dc);
+
+	pplib_apply_display_requirements(
+			core_dc, core_dc->current_context, &core_dc->current_context->pp_display_cfg);
+
+	return true;
+}
+
+bool dc_commit_surfaces_to_target(
+		struct dc *dc,
+		const struct dc_surface **new_surfaces,
+		uint8_t new_surface_count,
+		struct dc_target *dc_target)
+{
+	struct dc_surface_update updates[MAX_SURFACES] = { 0 };
+	struct dc_flip_addrs flip_addr[MAX_SURFACES] = { 0 };
+	struct dc_plane_info plane_info[MAX_SURFACES] = { 0 };
+	struct dc_scaling_info scaling_info[MAX_SURFACES] = { 0 };
+	int i;
+
+	if (!dc_pre_update_surfaces_to_target(
+			dc, new_surfaces, new_surface_count, dc_target))
+		return false;
+
+	for (i = 0; i < new_surface_count; i++) {
+		updates[i].surface = new_surfaces[i];
+		updates[i].gamma = (struct dc_gamma *)new_surfaces[i]->gamma_correction;
+
+		flip_addr[i].address = new_surfaces[i]->address;
+		flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
+		plane_info[i].color_space = new_surfaces[i]->color_space;
+		plane_info[i].format = new_surfaces[i]->format;
+		plane_info[i].plane_size = new_surfaces[i]->plane_size;
+		plane_info[i].rotation = new_surfaces[i]->rotation;
+		plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
+		plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
+		plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
+		plane_info[i].visible = new_surfaces[i]->visible;
+		scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
+		scaling_info[i].src_rect = new_surfaces[i]->src_rect;
+		scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
+		scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
+
+		updates[i].flip_addr = &flip_addr[i];
+		updates[i].plane_info = &plane_info[i];
+		updates[i].scaling_info = &scaling_info[i];
+	}
+	dc_update_surfaces_for_target(dc, updates, new_surface_count, dc_target);
+
+	return dc_post_update_surfaces_to_target(dc);
+}
+
+void dc_update_surfaces_for_target(struct dc *dc, struct dc_surface_update *updates,
+		int surface_count, struct dc_target *dc_target)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	struct validate_context *context = core_dc->temp_flip_context;
+	int i, j;
+	bool is_new_pipe_surface[MAX_SURFACES];
+	const struct dc_surface *new_surfaces[MAX_SURFACES] = { 0 };
+
+	update_surface_trace(dc, updates, surface_count);
+
+	*context = *core_dc->current_context;
+
+	for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
+		struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
+
+		if (cur_pipe->top_pipe)
+			cur_pipe->top_pipe =
+				&context->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
+
+		if (cur_pipe->bottom_pipe)
+			cur_pipe->bottom_pipe =
+				&context->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+	}
+
+	for (j = 0; j < MAX_SURFACES; j++)
+		is_new_pipe_surface[j] = true;
+
+	for (i = 0 ; i < surface_count; i++) {
+		struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
+
+		new_surfaces[i] = updates[i].surface;
+		for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
+			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+			if (surface == pipe_ctx->surface)
+				is_new_pipe_surface[i] = false;
+		}
+	}
+
+	if (dc_target) {
+		struct core_target *target = DC_TARGET_TO_CORE(dc_target);
+
+		if (core_dc->current_context->target_count == 0)
+			return;
+
+		/* Cannot commit surface to a target that is not commited */
+		for (i = 0; i < core_dc->current_context->target_count; i++)
+			if (target == core_dc->current_context->targets[i])
+				break;
+		if (i == core_dc->current_context->target_count)
+			return;
+
+		if (!resource_attach_surfaces_to_context(
+				new_surfaces, surface_count, dc_target, context)) {
+			BREAK_TO_DEBUGGER();
+			return;
+		}
+	}
+
+	for (i = 0; i < surface_count; i++) {
+		struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
+
+		for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
+			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+			if (pipe_ctx->surface != surface)
+				continue;
+
+			if (updates[i].flip_addr) {
+				surface->public.address = updates[i].flip_addr->address;
+				surface->public.flip_immediate =
+						updates[i].flip_addr->flip_immediate;
+			}
+
+			if (updates[i].plane_info || updates[i].scaling_info
+					|| is_new_pipe_surface[j]) {
+
+				if (updates[i].plane_info) {
+					surface->public.color_space =
+						updates[i].plane_info->color_space;
+					surface->public.format =
+						updates[i].plane_info->format;
+					surface->public.plane_size =
+						updates[i].plane_info->plane_size;
+					surface->public.rotation =
+						updates[i].plane_info->rotation;
+					surface->public.horizontal_mirror =
+						updates[i].plane_info->horizontal_mirror;
+					surface->public.stereo_format =
+						updates[i].plane_info->stereo_format;
+					surface->public.tiling_info =
+						updates[i].plane_info->tiling_info;
+					surface->public.visible =
+						updates[i].plane_info->visible;
+				}
+
+				if (updates[i].scaling_info) {
+					surface->public.scaling_quality =
+						updates[i].scaling_info->scaling_quality;
+					surface->public.dst_rect =
+						updates[i].scaling_info->dst_rect;
+					surface->public.src_rect =
+						updates[i].scaling_info->src_rect;
+					surface->public.clip_rect =
+						updates[i].scaling_info->clip_rect;
+				}
+
+				resource_build_scaling_params(updates[i].surface, pipe_ctx);
+				if (dc->debug.surface_visual_confirm) {
+					pipe_ctx->scl_data.recout.height -= 2;
+					pipe_ctx->scl_data.recout.width -= 2;
+				}
+			}
+		}
+	}
+
+	for (i = 0; i < surface_count; i++) {
+		struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
+		bool apply_ctx = false;
+
+		for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
+			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+			if (pipe_ctx->surface != surface)
+				continue;
+
+			if (updates[i].flip_addr) {
+				core_dc->hwss.pipe_control_lock(
+							core_dc->hwseq,
+							pipe_ctx->pipe_idx,
+							PIPE_LOCK_CONTROL_SURFACE,
+							true);
+				core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
+			}
+
+			if (updates[i].plane_info || updates[i].scaling_info
+					|| is_new_pipe_surface[j]) {
+
+				apply_ctx = true;
+
+				if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
+					core_dc->hwss.pipe_control_lock(
+							core_dc->hwseq,
+							pipe_ctx->pipe_idx,
+							PIPE_LOCK_CONTROL_SURFACE |
+							PIPE_LOCK_CONTROL_GRAPHICS |
+							PIPE_LOCK_CONTROL_SCL |
+							PIPE_LOCK_CONTROL_BLENDER |
+							PIPE_LOCK_CONTROL_MODE,
+							true);
+				}
+			}
+
+			if (updates[i].gamma)
+				core_dc->hwss.prepare_pipe_for_context(
+						core_dc, pipe_ctx, context);
+		}
+		if (apply_ctx)
+			core_dc->hwss.apply_ctx_for_surface(core_dc, surface, context);
+	}
+
+	for (i = context->res_ctx.pool->pipe_count - 1; i >= 0; i--) {
+		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+		for (j = 0; j < surface_count; j++) {
+			if (updates[j].surface == &pipe_ctx->surface->public) {
+				if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
+					core_dc->hwss.pipe_control_lock(
+							core_dc->hwseq,
+							pipe_ctx->pipe_idx,
+							PIPE_LOCK_CONTROL_GRAPHICS |
+							PIPE_LOCK_CONTROL_SCL |
+							PIPE_LOCK_CONTROL_BLENDER |
+							PIPE_LOCK_CONTROL_SURFACE,
+							false);
+				}
+				break;
+			}
+		}
+	}
+
+	core_dc->temp_flip_context = core_dc->current_context;
+	core_dc->current_context = context;
+}
+
+uint8_t dc_get_current_target_count(const struct dc *dc)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	return core_dc->current_context->target_count;
+}
+
+struct dc_target *dc_get_target_at_index(const struct dc *dc, uint8_t i)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	if (i < core_dc->current_context->target_count)
+		return &(core_dc->current_context->targets[i]->public);
+	return NULL;
+}
+
+const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	return &core_dc->links[link_index]->public;
+}
+
+const struct graphics_object_id dc_get_link_id_at_index(
+	struct dc *dc, uint32_t link_index)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	return core_dc->links[link_index]->link_id;
+}
+
+const struct ddc_service *dc_get_ddc_at_index(
+	struct dc *dc, uint32_t link_index)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	return core_dc->links[link_index]->ddc;
+}
+
+enum dc_irq_source dc_get_hpd_irq_source_at_index(
+	struct dc *dc, uint32_t link_index)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	return core_dc->links[link_index]->public.irq_source_hpd;
+}
+
+const struct audio **dc_get_audios(struct dc *dc)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	return (const struct audio **)core_dc->res_pool->audios;
+}
+
+void dc_flip_surface_addrs(
+		struct dc *dc,
+		const struct dc_surface *const surfaces[],
+		struct dc_flip_addrs flip_addrs[],
+		uint32_t count)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	int i, j;
+
+	for (i = 0; i < count; i++) {
+		struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
+
+		surface->public.address = flip_addrs[i].address;
+		surface->public.flip_immediate = flip_addrs[i].flip_immediate;
+
+		for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
+			struct pipe_ctx *pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
+
+			if (pipe_ctx->surface != surface)
+				continue;
+
+			core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
+		}
+	}
+}
+
+enum dc_irq_source dc_interrupt_to_irq_source(
+		struct dc *dc,
+		uint32_t src_id,
+		uint32_t ext_id)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
+}
+
+void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
+}
+
+void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	dal_irq_service_ack(core_dc->res_pool->irqs, src);
+}
+
+void dc_set_power_state(
+	struct dc *dc,
+	enum dc_acpi_cm_power_state power_state,
+	enum dc_video_power_state video_power_state)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+
+	core_dc->previous_power_state = core_dc->current_power_state;
+	core_dc->current_power_state = video_power_state;
+
+	switch (power_state) {
+	case DC_ACPI_CM_POWER_STATE_D0:
+		core_dc->hwss.init_hw(core_dc);
+		break;
+	default:
+		/* NULL means "reset/release all DC targets" */
+		dc_commit_targets(dc, NULL, 0);
+
+		core_dc->hwss.power_down(core_dc);
+
+		/* Zero out the current context so that on resume we start with
+		 * clean state, and dc hw programming optimizations will not
+		 * cause any trouble.
+		 */
+		memset(core_dc->current_context, 0,
+				sizeof(*core_dc->current_context));
+
+		core_dc->current_context->res_ctx.pool = core_dc->res_pool;
+
+		break;
+	}
+
+}
+
+void dc_resume(const struct dc *dc)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+
+	uint32_t i;
+
+	for (i = 0; i < core_dc->link_count; i++)
+		core_link_resume(core_dc->links[i]);
+}
+
+bool dc_read_dpcd(
+		struct dc *dc,
+		uint32_t link_index,
+		uint32_t address,
+		uint8_t *data,
+		uint32_t size)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+
+	struct core_link *link = core_dc->links[link_index];
+	enum ddc_result r = dal_ddc_service_read_dpcd_data(
+			link->ddc,
+			address,
+			data,
+			size);
+	return r == DDC_RESULT_SUCESSFULL;
+}
+
+bool dc_write_dpcd(
+		struct dc *dc,
+		uint32_t link_index,
+		uint32_t address,
+		const uint8_t *data,
+		uint32_t size)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+
+	struct core_link *link = core_dc->links[link_index];
+
+	enum ddc_result r = dal_ddc_service_write_dpcd_data(
+			link->ddc,
+			address,
+			data,
+			size);
+	return r == DDC_RESULT_SUCESSFULL;
+}
+
+bool dc_submit_i2c(
+		struct dc *dc,
+		uint32_t link_index,
+		struct i2c_command *cmd)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+
+	struct core_link *link = core_dc->links[link_index];
+	struct ddc_service *ddc = link->ddc;
+
+	return dal_i2caux_submit_i2c_command(
+		ddc->ctx->i2caux,
+		ddc->ddc_pin,
+		cmd);
+}
+
+static bool link_add_remote_sink_helper(struct core_link *core_link, struct dc_sink *sink)
+{
+	struct dc_link *dc_link = &core_link->public;
+
+	if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	dc_sink_retain(sink);
+
+	dc_link->remote_sinks[dc_link->sink_count] = sink;
+	dc_link->sink_count++;
+
+	return true;
+}
+
+struct dc_sink *dc_link_add_remote_sink(
+		const struct dc_link *link,
+		const uint8_t *edid,
+		int len,
+		struct dc_sink_init_data *init_data)
+{
+	struct dc_sink *dc_sink;
+	enum dc_edid_status edid_status;
+	struct core_link *core_link = DC_LINK_TO_LINK(link);
+
+	if (len > MAX_EDID_BUFFER_SIZE) {
+		dm_error("Max EDID buffer size breached!\n");
+		return NULL;
+	}
+
+	if (!init_data) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	if (!init_data->link) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	dc_sink = dc_sink_create(init_data);
+
+	if (!dc_sink)
+		return NULL;
+
+	memmove(dc_sink->dc_edid.raw_edid, edid, len);
+	dc_sink->dc_edid.length = len;
+
+	if (!link_add_remote_sink_helper(
+			core_link,
+			dc_sink))
+		goto fail_add_sink;
+
+	edid_status = dm_helpers_parse_edid_caps(
+			core_link->ctx,
+			&dc_sink->dc_edid,
+			&dc_sink->edid_caps);
+
+	if (edid_status != EDID_OK)
+		goto fail;
+
+	return dc_sink;
+fail:
+	dc_link_remove_remote_sink(link, dc_sink);
+fail_add_sink:
+	dc_sink_release(dc_sink);
+	return NULL;
+}
+
+void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink)
+{
+	struct core_link *core_link = DC_LINK_TO_LINK(link);
+	struct dc_link *dc_link = &core_link->public;
+
+	dc_link->local_sink = sink;
+
+	if (sink == NULL) {
+		dc_link->type = dc_connection_none;
+	} else {
+		dc_link->type = dc_connection_single;
+	}
+}
+
+void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink *sink)
+{
+	int i;
+	struct core_link *core_link = DC_LINK_TO_LINK(link);
+	struct dc_link *dc_link = &core_link->public;
+
+	if (!link->sink_count) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	for (i = 0; i < dc_link->sink_count; i++) {
+		if (dc_link->remote_sinks[i] == sink) {
+			dc_sink_release(sink);
+			dc_link->remote_sinks[i] = NULL;
+
+			/* shrink array to remove empty place */
+			while (i < dc_link->sink_count - 1) {
+				dc_link->remote_sinks[i] = dc_link->remote_sinks[i+1];
+				i++;
+			}
+
+			dc_link->sink_count--;
+			return;
+		}
+	}
+}
+
+const struct dc_stream_status *dc_stream_get_status(
+	const struct dc_stream *dc_stream)
+{
+	struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
+
+	return &stream->status;
+}
+
+bool dc_init_dchub(struct dc *dc, struct dchub_init_data *dh_data)
+{
+	int i;
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	struct mem_input *mi = NULL;
+
+	for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+		if (core_dc->res_pool->mis[i] != NULL) {
+			mi = core_dc->res_pool->mis[i];
+			break;
+		}
+	}
+	if (mi == NULL) {
+		dm_error("no mem_input!\n");
+		return false;
+	}
+
+	if (mi->funcs->mem_input_update_dchub)
+		mi->funcs->mem_input_update_dchub(mi, dh_data);
+	else
+		ASSERT(mi->funcs->mem_input_update_dchub);
+
+
+	return true;
+
+}
+

+ 270 - 0
drivers/gpu/drm/amd/display/dc/core/dc_debug.c

@@ -0,0 +1,270 @@
+/*
+ * dc_debug.c
+ *
+ *  Created on: Nov 3, 2016
+ *      Author: yonsun
+ */
+
+#include "dm_services.h"
+
+#include "dc.h"
+
+#include "core_status.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+
+#include "resource.h"
+
+#define SURFACE_TRACE(...) do {\
+		if (dc->debug.surface_trace) \
+			dm_logger_write(logger, \
+					LOG_IF_TRACE, \
+					##__VA_ARGS__); \
+} while (0)
+
+void pre_surface_trace(
+		const struct dc *dc,
+		const struct dc_surface *const *surfaces,
+		int surface_count)
+{
+	int i;
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	struct dal_logger *logger =  core_dc->ctx->logger;
+
+	for (i = 0; i < surface_count; i++) {
+		const struct dc_surface *surface = surfaces[i];
+
+		SURFACE_TRACE("Surface %d:\n", i);
+
+		SURFACE_TRACE(
+				"surface->visible = %d;\n"
+				"surface->flip_immediate = %d;\n"
+				"surface->address.type = %d;\n"
+				"surface->address.grph.addr.quad_part = 0x%X;\n"
+				"surface->address.grph.meta_addr.quad_part = 0x%X;\n"
+				"surface->scaling_quality.h_taps = %d;\n"
+				"surface->scaling_quality.v_taps = %d;\n"
+				"surface->scaling_quality.h_taps_c = %d;\n"
+				"surface->scaling_quality.v_taps_c = %d;\n",
+				surface->visible,
+				surface->flip_immediate,
+				surface->address.type,
+				surface->address.grph.addr.quad_part,
+				surface->address.grph.meta_addr.quad_part,
+				surface->scaling_quality.h_taps,
+				surface->scaling_quality.v_taps,
+				surface->scaling_quality.h_taps_c,
+				surface->scaling_quality.v_taps_c);
+
+		SURFACE_TRACE(
+				"surface->src_rect.x = %d;\n"
+				"surface->src_rect.y = %d;\n"
+				"surface->src_rect.width = %d;\n"
+				"surface->src_rect.height = %d;\n"
+				"surface->dst_rect.x = %d;\n"
+				"surface->dst_rect.y = %d;\n"
+				"surface->dst_rect.width = %d;\n"
+				"surface->dst_rect.height = %d;\n"
+				"surface->clip_rect.x = %d;\n"
+				"surface->clip_rect.y = %d;\n"
+				"surface->clip_rect.width = %d;\n"
+				"surface->clip_rect.height = %d;\n",
+				surface->src_rect.x,
+				surface->src_rect.y,
+				surface->src_rect.width,
+				surface->src_rect.height,
+				surface->dst_rect.x,
+				surface->dst_rect.y,
+				surface->dst_rect.width,
+				surface->dst_rect.height,
+				surface->clip_rect.x,
+				surface->clip_rect.y,
+				surface->clip_rect.width,
+				surface->clip_rect.height);
+
+		SURFACE_TRACE(
+				"surface->plane_size.grph.surface_size.x = %d;\n"
+				"surface->plane_size.grph.surface_size.y = %d;\n"
+				"surface->plane_size.grph.surface_size.width = %d;\n"
+				"surface->plane_size.grph.surface_size.height = %d;\n"
+				"surface->plane_size.grph.surface_pitch = %d;\n"
+				"surface->plane_size.grph.meta_pitch = %d;\n",
+				surface->plane_size.grph.surface_size.x,
+				surface->plane_size.grph.surface_size.y,
+				surface->plane_size.grph.surface_size.width,
+				surface->plane_size.grph.surface_size.height,
+				surface->plane_size.grph.surface_pitch,
+				surface->plane_size.grph.meta_pitch);
+
+
+		SURFACE_TRACE(
+				"surface->tiling_info.gfx8.num_banks = %d;\n"
+				"surface->tiling_info.gfx8.bank_width = %d;\n"
+				"surface->tiling_info.gfx8.bank_width_c = %d;\n"
+				"surface->tiling_info.gfx8.bank_height = %d;\n"
+				"surface->tiling_info.gfx8.bank_height_c = %d;\n"
+				"surface->tiling_info.gfx8.tile_aspect = %d;\n"
+				"surface->tiling_info.gfx8.tile_aspect_c = %d;\n"
+				"surface->tiling_info.gfx8.tile_split = %d;\n"
+				"surface->tiling_info.gfx8.tile_split_c = %d;\n"
+				"surface->tiling_info.gfx8.tile_mode = %d;\n"
+				"surface->tiling_info.gfx8.tile_mode_c = %d;\n",
+				surface->tiling_info.gfx8.num_banks,
+				surface->tiling_info.gfx8.bank_width,
+				surface->tiling_info.gfx8.bank_width_c,
+				surface->tiling_info.gfx8.bank_height,
+				surface->tiling_info.gfx8.bank_height_c,
+				surface->tiling_info.gfx8.tile_aspect,
+				surface->tiling_info.gfx8.tile_aspect_c,
+				surface->tiling_info.gfx8.tile_split,
+				surface->tiling_info.gfx8.tile_split_c,
+				surface->tiling_info.gfx8.tile_mode,
+				surface->tiling_info.gfx8.tile_mode_c);
+
+		SURFACE_TRACE(
+				"surface->tiling_info.gfx8.pipe_config = %d;\n"
+				"surface->tiling_info.gfx8.array_mode = %d;\n"
+				"surface->color_space = %d;\n"
+				"surface->dcc.enable = %d;\n"
+				"surface->format = %d;\n"
+				"surface->rotation = %d;\n"
+				"surface->stereo_format = %d;\n",
+				surface->tiling_info.gfx8.pipe_config,
+				surface->tiling_info.gfx8.array_mode,
+				surface->color_space,
+				surface->dcc.enable,
+				surface->format,
+				surface->rotation,
+				surface->stereo_format);
+		SURFACE_TRACE("\n");
+	}
+	SURFACE_TRACE("\n");
+}
+
+void update_surface_trace(
+		const struct dc *dc,
+		const struct dc_surface_update *updates,
+		int surface_count)
+{
+	int i;
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	struct dal_logger *logger =  core_dc->ctx->logger;
+
+	for (i = 0; i < surface_count; i++) {
+		const struct dc_surface_update *update = &updates[i];
+
+		SURFACE_TRACE("Update %d\n", i);
+		if (update->flip_addr) {
+			SURFACE_TRACE("flip_addr->address.type = %d;\n"
+					"flip_addr->address.grph.addr.quad_part = 0x%X;\n"
+					"flip_addr->address.grph.meta_addr.quad_part = 0x%X;\n"
+					"flip_addr->flip_immediate = %d;\n",
+					update->flip_addr->address.type,
+					update->flip_addr->address.grph.addr.quad_part,
+					update->flip_addr->address.grph.meta_addr.quad_part,
+					update->flip_addr->flip_immediate);
+		}
+
+		if (update->plane_info) {
+			SURFACE_TRACE(
+					"plane_info->color_space = %d;\n"
+					"plane_info->format = %d;\n"
+					"plane_info->plane_size.grph.meta_pitch = %d;\n"
+					"plane_info->plane_size.grph.surface_pitch = %d;\n"
+					"plane_info->plane_size.grph.surface_size.height = %d;\n"
+					"plane_info->plane_size.grph.surface_size.width = %d;\n"
+					"plane_info->plane_size.grph.surface_size.x = %d;\n"
+					"plane_info->plane_size.grph.surface_size.y = %d;\n"
+					"plane_info->rotation = %d;\n",
+					update->plane_info->color_space,
+					update->plane_info->format,
+					update->plane_info->plane_size.grph.meta_pitch,
+					update->plane_info->plane_size.grph.surface_pitch,
+					update->plane_info->plane_size.grph.surface_size.height,
+					update->plane_info->plane_size.grph.surface_size.width,
+					update->plane_info->plane_size.grph.surface_size.x,
+					update->plane_info->plane_size.grph.surface_size.y,
+					update->plane_info->rotation,
+					update->plane_info->stereo_format);
+
+			SURFACE_TRACE(
+					"plane_info->tiling_info.gfx8.num_banks = %d;\n"
+					"plane_info->tiling_info.gfx8.bank_width = %d;\n"
+					"plane_info->tiling_info.gfx8.bank_width_c = %d;\n"
+					"plane_info->tiling_info.gfx8.bank_height = %d;\n"
+					"plane_info->tiling_info.gfx8.bank_height_c = %d;\n"
+					"plane_info->tiling_info.gfx8.tile_aspect = %d;\n"
+					"plane_info->tiling_info.gfx8.tile_aspect_c = %d;\n"
+					"plane_info->tiling_info.gfx8.tile_split = %d;\n"
+					"plane_info->tiling_info.gfx8.tile_split_c = %d;\n"
+					"plane_info->tiling_info.gfx8.tile_mode = %d;\n"
+					"plane_info->tiling_info.gfx8.tile_mode_c = %d;\n",
+					update->plane_info->tiling_info.gfx8.num_banks,
+					update->plane_info->tiling_info.gfx8.bank_width,
+					update->plane_info->tiling_info.gfx8.bank_width_c,
+					update->plane_info->tiling_info.gfx8.bank_height,
+					update->plane_info->tiling_info.gfx8.bank_height_c,
+					update->plane_info->tiling_info.gfx8.tile_aspect,
+					update->plane_info->tiling_info.gfx8.tile_aspect_c,
+					update->plane_info->tiling_info.gfx8.tile_split,
+					update->plane_info->tiling_info.gfx8.tile_split_c,
+					update->plane_info->tiling_info.gfx8.tile_mode,
+					update->plane_info->tiling_info.gfx8.tile_mode_c);
+
+			SURFACE_TRACE(
+					"plane_info->tiling_info.gfx8.pipe_config = %d;\n"
+					"plane_info->tiling_info.gfx8.array_mode = %d;\n"
+					"plane_info->visible = %d;\n",
+					update->plane_info->tiling_info.gfx8.pipe_config,
+					update->plane_info->tiling_info.gfx8.array_mode,
+					update->plane_info->visible);
+		}
+
+		if (update->scaling_info) {
+			SURFACE_TRACE(
+					"scaling_info->src_rect.x = %d;\n"
+					"scaling_info->src_rect.y = %d;\n"
+					"scaling_info->src_rect.width = %d;\n"
+					"scaling_info->src_rect.height = %d;\n"
+					"scaling_info->dst_rect.x = %d;\n"
+					"scaling_info->dst_rect.y = %d;\n"
+					"scaling_info->dst_rect.width = %d;\n"
+					"scaling_info->dst_rect.height = %d;\n"
+					"scaling_info->clip_rect.x = %d;\n"
+					"scaling_info->clip_rect.y = %d;\n"
+					"scaling_info->clip_rect.width = %d;\n"
+					"scaling_info->clip_rect.height = %d;\n"
+					"scaling_info->scaling_quality.h_taps = %d;\n"
+					"scaling_info->scaling_quality.v_taps = %d;\n"
+					"scaling_info->scaling_quality.h_taps_c = %d;\n"
+					"scaling_info->scaling_quality.v_taps_c = %d;\n",
+					update->scaling_info->src_rect.x,
+					update->scaling_info->src_rect.y,
+					update->scaling_info->src_rect.width,
+					update->scaling_info->src_rect.height,
+					update->scaling_info->dst_rect.x,
+					update->scaling_info->dst_rect.y,
+					update->scaling_info->dst_rect.width,
+					update->scaling_info->dst_rect.height,
+					update->scaling_info->clip_rect.x,
+					update->scaling_info->clip_rect.y,
+					update->scaling_info->clip_rect.width,
+					update->scaling_info->clip_rect.height,
+					update->scaling_info->scaling_quality.h_taps,
+					update->scaling_info->scaling_quality.v_taps,
+					update->scaling_info->scaling_quality.h_taps_c,
+					update->scaling_info->scaling_quality.v_taps_c);
+		}
+		SURFACE_TRACE("\n");
+	}
+	SURFACE_TRACE("\n");
+}
+
+void post_surface_trace(const struct dc *dc)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+	struct dal_logger *logger =  core_dc->ctx->logger;
+
+	SURFACE_TRACE("post surface process.\n");
+
+}

+ 93 - 0
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c

@@ -0,0 +1,93 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "core_types.h"
+#include "core_dc.h"
+#include "timing_generator.h"
+#include "hw_sequencer.h"
+
+/* used as index in array of black_color_format */
+enum black_color_format {
+	BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0,
+	BLACK_COLOR_FORMAT_RGB_LIMITED,
+	BLACK_COLOR_FORMAT_YUV_TV,
+	BLACK_COLOR_FORMAT_YUV_CV,
+	BLACK_COLOR_FORMAT_YUV_SUPER_AA,
+	BLACK_COLOR_FORMAT_DEBUG,
+};
+
+static const struct tg_color black_color_format[] = {
+	/* BlackColorFormat_RGB_FullRange */
+	{0, 0, 0},
+	/* BlackColorFormat_RGB_Limited */
+	{0x40, 0x40, 0x40},
+	/* BlackColorFormat_YUV_TV */
+	{0x200, 0x40, 0x200},
+	/* BlackColorFormat_YUV_CV */
+	{0x1f4, 0x40, 0x1f4},
+	/* BlackColorFormat_YUV_SuperAA */
+	{0x1a2, 0x20, 0x1a2},
+	/* visual confirm debug */
+	{0xff, 0xff, 0},
+};
+
+void color_space_to_black_color(
+	const struct core_dc *dc,
+	enum dc_color_space colorspace,
+	struct tg_color *black_color)
+{
+	if (dc->public.debug.surface_visual_confirm) {
+		*black_color =
+			black_color_format[BLACK_COLOR_FORMAT_DEBUG];
+		return;
+	}
+
+	switch (colorspace) {
+	case COLOR_SPACE_YPBPR601:
+		*black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_TV];
+		break;
+
+	case COLOR_SPACE_YPBPR709:
+	case COLOR_SPACE_YCBCR601:
+	case COLOR_SPACE_YCBCR709:
+	case COLOR_SPACE_YCBCR601_LIMITED:
+	case COLOR_SPACE_YCBCR709_LIMITED:
+		*black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV];
+		break;
+
+	case COLOR_SPACE_SRGB_LIMITED:
+		*black_color =
+			black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED];
+		break;
+
+	default:
+		/* fefault is sRGB black (full range). */
+		*black_color =
+			black_color_format[BLACK_COLOR_FORMAT_RGB_FULLRANGE];
+		/* default is sRGB black 0. */
+		break;
+	}
+}

+ 1899 - 0
drivers/gpu/drm/amd/display/dc/core/dc_link.c

@@ -0,0 +1,1899 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "dc.h"
+#include "core_dc.h"
+#include "grph_object_id.h"
+#include "gpio_service_interface.h"
+#include "core_status.h"
+#include "dc_link_dp.h"
+#include "dc_link_ddc.h"
+#include "link_hwss.h"
+#include "stream_encoder.h"
+#include "link_encoder.h"
+#include "hw_sequencer.h"
+#include "resource.h"
+#include "fixed31_32.h"
+#include "include/asic_capability_interface.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_enum.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#ifndef mmDMCU_STATUS__UC_IN_RESET__SHIFT
+#define mmDMCU_STATUS__UC_IN_RESET__SHIFT 0x0
+#endif
+
+#ifndef mmDMCU_STATUS__UC_IN_RESET_MASK
+#define mmDMCU_STATUS__UC_IN_RESET_MASK 0x00000001L
+#endif
+
+#define LINK_INFO(...) \
+	dm_logger_write(dc_ctx->logger, LOG_HW_HOTPLUG, \
+		__VA_ARGS__)
+
+/*******************************************************************************
+ * Private structures
+ ******************************************************************************/
+
+enum {
+	LINK_RATE_REF_FREQ_IN_MHZ = 27,
+	PEAK_FACTOR_X1000 = 1006
+};
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void destruct(struct core_link *link)
+{
+	int i;
+
+	if (link->ddc)
+		dal_ddc_service_destroy(&link->ddc);
+
+	if(link->link_enc)
+		link->link_enc->funcs->destroy(&link->link_enc);
+
+	if (link->public.local_sink)
+		dc_sink_release(link->public.local_sink);
+
+	for (i = 0; i < link->public.sink_count; ++i)
+		dc_sink_release(link->public.remote_sinks[i]);
+}
+
+static struct gpio *get_hpd_gpio(const struct core_link *link)
+{
+	enum bp_result bp_result;
+	struct dc_bios *dcb = link->ctx->dc_bios;
+	struct graphics_object_hpd_info hpd_info;
+	struct gpio_pin_info pin_info;
+
+	if (dcb->funcs->get_hpd_info(dcb, link->link_id, &hpd_info) != BP_RESULT_OK)
+		return NULL;
+
+	bp_result = dcb->funcs->get_gpio_pin_info(dcb,
+		hpd_info.hpd_int_gpio_uid, &pin_info);
+
+	if (bp_result != BP_RESULT_OK) {
+		ASSERT(bp_result == BP_RESULT_NORECORD);
+		return NULL;
+	}
+
+	return dal_gpio_service_create_irq(
+		link->ctx->gpio_service,
+		pin_info.offset,
+		pin_info.mask);
+}
+
+/*
+ *  Function: program_hpd_filter
+ *
+ *  @brief
+ *     Programs HPD filter on associated HPD line
+ *
+ *  @param [in] delay_on_connect_in_ms: Connect filter timeout
+ *  @param [in] delay_on_disconnect_in_ms: Disconnect filter timeout
+ *
+ *  @return
+ *     true on success, false otherwise
+ */
+static bool program_hpd_filter(
+	const struct core_link *link)
+{
+	bool result = false;
+
+	struct gpio *hpd;
+
+	int delay_on_connect_in_ms = 0;
+	int delay_on_disconnect_in_ms = 0;
+
+	/* Verify feature is supported */
+	switch (link->public.connector_signal) {
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		/* Program hpd filter */
+		delay_on_connect_in_ms = 500;
+		delay_on_disconnect_in_ms = 100;
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		/* Program hpd filter to allow DP signal to settle */
+		/* 500:	not able to detect MST <-> SST switch as HPD is low for
+		 * 	only 100ms on DELL U2413
+		 * 0:	some passive dongle still show aux mode instead of i2c
+		 * 20-50:not enough to hide bouncing HPD with passive dongle.
+		 * 	also see intermittent i2c read issues.
+		 */
+		delay_on_connect_in_ms = 80;
+		delay_on_disconnect_in_ms = 0;
+		break;
+	case SIGNAL_TYPE_LVDS:
+	case SIGNAL_TYPE_EDP:
+	default:
+		/* Don't program hpd filter */
+		return false;
+	}
+
+	/* Obtain HPD handle */
+	hpd = get_hpd_gpio(link);
+
+	if (!hpd)
+		return result;
+
+	/* Setup HPD filtering */
+	if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
+		struct gpio_hpd_config config;
+
+		config.delay_on_connect = delay_on_connect_in_ms;
+		config.delay_on_disconnect = delay_on_disconnect_in_ms;
+
+		dal_irq_setup_hpd_filter(hpd, &config);
+
+		dal_gpio_close(hpd);
+
+		result = true;
+	} else {
+		ASSERT_CRITICAL(false);
+	}
+
+	/* Release HPD handle */
+	dal_gpio_destroy_irq(&hpd);
+
+	return result;
+}
+
+static bool detect_sink(struct core_link *link, enum dc_connection_type *type)
+{
+	uint32_t is_hpd_high = 0;
+	struct gpio *hpd_pin;
+
+	/* todo: may need to lock gpio access */
+	hpd_pin = get_hpd_gpio(link);
+	if (hpd_pin == NULL)
+		goto hpd_gpio_failure;
+
+	dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
+	dal_gpio_get_value(hpd_pin, &is_hpd_high);
+	dal_gpio_close(hpd_pin);
+	dal_gpio_destroy_irq(&hpd_pin);
+
+	if (is_hpd_high) {
+		*type = dc_connection_single;
+		/* TODO: need to do the actual detection */
+	} else {
+		*type = dc_connection_none;
+	}
+
+	return true;
+
+hpd_gpio_failure:
+	return false;
+}
+
+enum ddc_transaction_type get_ddc_transaction_type(
+		enum signal_type sink_signal)
+{
+	enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
+
+	switch (sink_signal) {
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+	case SIGNAL_TYPE_LVDS:
+	case SIGNAL_TYPE_RGB:
+		transaction_type = DDC_TRANSACTION_TYPE_I2C;
+		break;
+
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_EDP:
+		transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+		break;
+
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		/* MST does not use I2COverAux, but there is the
+		 * SPECIAL use case for "immediate dwnstrm device
+		 * access" (EPR#370830). */
+		transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+		break;
+
+	default:
+		break;
+	}
+
+	return transaction_type;
+}
+
+static enum signal_type get_basic_signal_type(
+	struct graphics_object_id encoder,
+	struct graphics_object_id downstream)
+{
+	if (downstream.type == OBJECT_TYPE_CONNECTOR) {
+		switch (downstream.id) {
+		case CONNECTOR_ID_SINGLE_LINK_DVII:
+			switch (encoder.id) {
+			case ENCODER_ID_INTERNAL_DAC1:
+			case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+			case ENCODER_ID_INTERNAL_DAC2:
+			case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+				return SIGNAL_TYPE_RGB;
+			default:
+				return SIGNAL_TYPE_DVI_SINGLE_LINK;
+			}
+		break;
+		case CONNECTOR_ID_DUAL_LINK_DVII:
+		{
+			switch (encoder.id) {
+			case ENCODER_ID_INTERNAL_DAC1:
+			case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+			case ENCODER_ID_INTERNAL_DAC2:
+			case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+				return SIGNAL_TYPE_RGB;
+			default:
+				return SIGNAL_TYPE_DVI_DUAL_LINK;
+			}
+		}
+		break;
+		case CONNECTOR_ID_SINGLE_LINK_DVID:
+			return SIGNAL_TYPE_DVI_SINGLE_LINK;
+		case CONNECTOR_ID_DUAL_LINK_DVID:
+			return SIGNAL_TYPE_DVI_DUAL_LINK;
+		case CONNECTOR_ID_VGA:
+			return SIGNAL_TYPE_RGB;
+		case CONNECTOR_ID_HDMI_TYPE_A:
+			return SIGNAL_TYPE_HDMI_TYPE_A;
+		case CONNECTOR_ID_LVDS:
+			return SIGNAL_TYPE_LVDS;
+		case CONNECTOR_ID_DISPLAY_PORT:
+			return SIGNAL_TYPE_DISPLAY_PORT;
+		case CONNECTOR_ID_EDP:
+			return SIGNAL_TYPE_EDP;
+		default:
+			return SIGNAL_TYPE_NONE;
+		}
+	} else if (downstream.type == OBJECT_TYPE_ENCODER) {
+		switch (downstream.id) {
+		case ENCODER_ID_EXTERNAL_NUTMEG:
+		case ENCODER_ID_EXTERNAL_TRAVIS:
+			return SIGNAL_TYPE_DISPLAY_PORT;
+		default:
+			return SIGNAL_TYPE_NONE;
+		}
+	}
+
+	return SIGNAL_TYPE_NONE;
+}
+
+/*
+ * @brief
+ * Check whether there is a dongle on DP connector
+ */
+static bool is_dp_sink_present(struct core_link *link)
+{
+	enum gpio_result gpio_result;
+	uint32_t clock_pin = 0;
+	uint32_t data_pin = 0;
+
+	struct ddc *ddc;
+
+	enum connector_id connector_id =
+		dal_graphics_object_id_get_connector_id(link->link_id);
+
+	bool present =
+		((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
+		(connector_id == CONNECTOR_ID_EDP));
+
+	ddc = dal_ddc_service_get_ddc_pin(link->ddc);
+
+	if (!ddc) {
+		BREAK_TO_DEBUGGER();
+		return present;
+	}
+
+	/* Open GPIO and set it to I2C mode */
+	/* Note: this GpioMode_Input will be converted
+	 * to GpioConfigType_I2cAuxDualMode in GPIO component,
+	 * which indicates we need additional delay */
+
+	if (GPIO_RESULT_OK != dal_ddc_open(
+		ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
+		dal_gpio_destroy_ddc(&ddc);
+
+		return present;
+	}
+
+	/* Read GPIO: DP sink is present if both clock and data pins are zero */
+	/* [anaumov] in DAL2, there was no check for GPIO failure */
+
+	gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
+	ASSERT(gpio_result == GPIO_RESULT_OK);
+
+	if (gpio_result == GPIO_RESULT_OK)
+		if (link->link_enc->features.flags.bits.
+						DP_SINK_DETECT_POLL_DATA_PIN)
+			gpio_result = dal_gpio_get_value(ddc->pin_data, &data_pin);
+
+	present = (gpio_result == GPIO_RESULT_OK) && !(clock_pin || data_pin);
+
+	dal_ddc_close(ddc);
+
+	return present;
+}
+
+/*
+ * @brief
+ * Detect output sink type
+ */
+static enum signal_type link_detect_sink(struct core_link *link)
+{
+	enum signal_type result = get_basic_signal_type(
+		link->link_enc->id, link->link_id);
+
+	/* Internal digital encoder will detect only dongles
+	 * that require digital signal */
+
+	/* Detection mechanism is different
+	 * for different native connectors.
+	 * LVDS connector supports only LVDS signal;
+	 * PCIE is a bus slot, the actual connector needs to be detected first;
+	 * eDP connector supports only eDP signal;
+	 * HDMI should check straps for audio */
+
+	/* PCIE detects the actual connector on add-on board */
+
+	if (link->link_id.id == CONNECTOR_ID_PCIE) {
+		/* ZAZTODO implement PCIE add-on card detection */
+	}
+
+	switch (link->link_id.id) {
+	case CONNECTOR_ID_HDMI_TYPE_A: {
+		/* check audio support:
+		 * if native HDMI is not supported, switch to DVI */
+		struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+
+		if (!aud_support->hdmi_audio_native)
+			if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A)
+				result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+	}
+	break;
+	case CONNECTOR_ID_DISPLAY_PORT: {
+
+		/* Check whether DP signal detected: if not -
+		 * we assume signal is DVI; it could be corrected
+		 * to HDMI after dongle detection */
+		if (!is_dp_sink_present(link))
+			result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+	}
+	break;
+	default:
+	break;
+	}
+
+	return result;
+}
+
+static enum signal_type decide_signal_from_strap_and_dongle_type(
+		enum display_dongle_type dongle_type,
+		struct audio_support *audio_support)
+{
+	enum signal_type signal = SIGNAL_TYPE_NONE;
+
+	switch (dongle_type) {
+	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
+		if (audio_support->hdmi_audio_on_dongle)
+			signal =  SIGNAL_TYPE_HDMI_TYPE_A;
+		else
+			signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+		break;
+	case DISPLAY_DONGLE_DP_DVI_DONGLE:
+		signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+		break;
+	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
+		if (audio_support->hdmi_audio_native)
+			signal =  SIGNAL_TYPE_HDMI_TYPE_A;
+		else
+			signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+		break;
+	default:
+		signal = SIGNAL_TYPE_NONE;
+		break;
+	}
+
+	return signal;
+}
+
+static enum signal_type dp_passive_dongle_detection(
+		struct ddc_service *ddc,
+		struct display_sink_capability *sink_cap,
+		struct audio_support *audio_support)
+{
+	dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
+						ddc, sink_cap);
+	return decide_signal_from_strap_and_dongle_type(
+			sink_cap->dongle_type,
+			audio_support);
+}
+
+static void link_disconnect_sink(struct core_link *link)
+{
+	if (link->public.local_sink) {
+		dc_sink_release(link->public.local_sink);
+		link->public.local_sink = NULL;
+	}
+
+	link->dpcd_sink_count = 0;
+}
+
+static enum dc_edid_status read_edid(
+	struct core_link *link,
+	struct core_sink *sink)
+{
+	uint32_t edid_retry = 3;
+	enum dc_edid_status edid_status;
+
+	/* some dongles read edid incorrectly the first time,
+	 * do check sum and retry to make sure read correct edid.
+	 */
+	do {
+		sink->public.dc_edid.length =
+				dal_ddc_service_edid_query(link->ddc);
+
+		if (0 == sink->public.dc_edid.length)
+			return EDID_NO_RESPONSE;
+
+		dal_ddc_service_get_edid_buf(link->ddc,
+				sink->public.dc_edid.raw_edid);
+		edid_status = dm_helpers_parse_edid_caps(
+				sink->ctx,
+				&sink->public.dc_edid,
+				&sink->public.edid_caps);
+		--edid_retry;
+		if (edid_status == EDID_BAD_CHECKSUM)
+			dm_logger_write(link->ctx->logger, LOG_WARNING,
+					"Bad EDID checksum, retry remain: %d\n",
+					edid_retry);
+	} while (edid_status == EDID_BAD_CHECKSUM && edid_retry > 0);
+
+	return edid_status;
+}
+
+static void detect_dp(
+	struct core_link *link,
+	struct display_sink_capability *sink_caps,
+	bool *converter_disable_audio,
+	struct audio_support *audio_support,
+	bool boot)
+{
+	sink_caps->signal = link_detect_sink(link);
+	sink_caps->transaction_type =
+		get_ddc_transaction_type(sink_caps->signal);
+
+	if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+		sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+		detect_dp_sink_caps(link);
+
+		/* DP active dongles */
+		if (is_dp_active_dongle(link)) {
+			link->public.type = dc_connection_active_dongle;
+			if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
+				/*
+				 * active dongle unplug processing for short irq
+				 */
+				link_disconnect_sink(link);
+				return;
+			}
+
+			if (link->dpcd_caps.dongle_type !=
+			DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
+				*converter_disable_audio = true;
+			}
+		}
+		if (is_mst_supported(link)) {
+			sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+
+			/*
+			 * This call will initiate MST topology discovery. Which
+			 * will detect MST ports and add new DRM connector DRM
+			 * framework. Then read EDID via remote i2c over aux. In
+			 * the end, will notify DRM detect result and save EDID
+			 * into DRM framework.
+			 *
+			 * .detect is called by .fill_modes.
+			 * .fill_modes is called by user mode ioctl
+			 * DRM_IOCTL_MODE_GETCONNECTOR.
+			 *
+			 * .get_modes is called by .fill_modes.
+			 *
+			 * call .get_modes, AMDGPU DM implementation will create
+			 * new dc_sink and add to dc_link. For long HPD plug
+			 * in/out, MST has its own handle.
+			 *
+			 * Therefore, just after dc_create, link->sink is not
+			 * created for MST until user mode app calls
+			 * DRM_IOCTL_MODE_GETCONNECTOR.
+			 *
+			 * Need check ->sink usages in case ->sink = NULL
+			 * TODO: s3 resume check
+			 */
+
+			if (dm_helpers_dp_mst_start_top_mgr(
+				link->ctx,
+				&link->public, boot)) {
+				link->public.type = dc_connection_mst_branch;
+			} else {
+				/* MST not supported */
+				sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+			}
+		}
+	} else {
+		/* DP passive dongles */
+		sink_caps->signal = dp_passive_dongle_detection(link->ddc,
+				sink_caps,
+				audio_support);
+	}
+}
+
+bool dc_link_detect(const struct dc_link *dc_link, bool boot)
+{
+	struct core_link *link = DC_LINK_TO_LINK(dc_link);
+	struct dc_sink_init_data sink_init_data = { 0 };
+	struct display_sink_capability sink_caps = { 0 };
+	uint8_t i;
+	bool converter_disable_audio = false;
+	struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+	enum dc_edid_status edid_status;
+	struct dc_context *dc_ctx = link->ctx;
+	struct dc_sink *dc_sink;
+	struct core_sink *sink = NULL;
+	enum dc_connection_type new_connection_type = dc_connection_none;
+
+	if (link->public.connector_signal == SIGNAL_TYPE_VIRTUAL)
+		return false;
+
+	if (false == detect_sink(link, &new_connection_type)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	if (link->public.connector_signal == SIGNAL_TYPE_EDP &&
+			link->public.local_sink)
+		return true;
+
+	link_disconnect_sink(link);
+
+	if (new_connection_type != dc_connection_none) {
+		link->public.type = new_connection_type;
+
+		/* From Disconnected-to-Connected. */
+		switch (link->public.connector_signal) {
+		case SIGNAL_TYPE_HDMI_TYPE_A: {
+			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+			if (aud_support->hdmi_audio_native)
+				sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+			else
+				sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+			break;
+		}
+
+		case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+			sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+			break;
+		}
+
+		case SIGNAL_TYPE_DVI_DUAL_LINK: {
+			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+			sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+			break;
+		}
+
+		case SIGNAL_TYPE_EDP: {
+			detect_dp_sink_caps(link);
+			sink_caps.transaction_type =
+				DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+			sink_caps.signal = SIGNAL_TYPE_EDP;
+			break;
+		}
+
+		case SIGNAL_TYPE_DISPLAY_PORT: {
+			detect_dp(
+				link,
+				&sink_caps,
+				&converter_disable_audio,
+				aud_support, boot);
+
+			/* Active dongle downstream unplug */
+			if (link->public.type == dc_connection_active_dongle
+					&& link->dpcd_caps.sink_count.
+					bits.SINK_COUNT == 0)
+				return true;
+
+			if (link->public.type == dc_connection_mst_branch) {
+				LINK_INFO("link=%d, mst branch is now Connected\n",
+					link->public.link_index);
+				return false;
+			}
+
+			break;
+		}
+
+		default:
+			DC_ERROR("Invalid connector type! signal:%d\n",
+				link->public.connector_signal);
+			return false;
+		} /* switch() */
+
+		if (link->dpcd_caps.sink_count.bits.SINK_COUNT)
+			link->dpcd_sink_count = link->dpcd_caps.sink_count.
+					bits.SINK_COUNT;
+			else
+				link->dpcd_sink_count = 1;
+
+		dal_ddc_service_set_transaction_type(
+						link->ddc,
+						sink_caps.transaction_type);
+
+		sink_init_data.link = &link->public;
+		sink_init_data.sink_signal = sink_caps.signal;
+		sink_init_data.dongle_max_pix_clk =
+			sink_caps.max_hdmi_pixel_clock;
+		sink_init_data.converter_disable_audio =
+			converter_disable_audio;
+
+		dc_sink = dc_sink_create(&sink_init_data);
+		if (!dc_sink) {
+			DC_ERROR("Failed to create sink!\n");
+			return false;
+		}
+
+		sink = DC_SINK_TO_CORE(dc_sink);
+		link->public.local_sink = &sink->public;
+
+		edid_status = read_edid(link, sink);
+
+		switch (edid_status) {
+		case EDID_BAD_CHECKSUM:
+			dm_logger_write(link->ctx->logger, LOG_ERROR,
+				"EDID checksum invalid.\n");
+			break;
+		case EDID_NO_RESPONSE:
+			dm_logger_write(link->ctx->logger, LOG_ERROR,
+				"No EDID read.\n");
+			return false;
+
+		default:
+			break;
+		}
+
+		/* HDMI-DVI Dongle */
+		if (dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
+				!dc_sink->edid_caps.edid_hdmi)
+			dc_sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+
+		/* Connectivity log: detection */
+		for (i = 0; i < sink->public.dc_edid.length / EDID_BLOCK_SIZE; i++) {
+			CONN_DATA_DETECT(link,
+					&sink->public.dc_edid.raw_edid[i * EDID_BLOCK_SIZE],
+					EDID_BLOCK_SIZE,
+					"%s: [Block %d] ", sink->public.edid_caps.display_name, i);
+		}
+
+		dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER,
+			"%s: "
+			"manufacturer_id = %X, "
+			"product_id = %X, "
+			"serial_number = %X, "
+			"manufacture_week = %d, "
+			"manufacture_year = %d, "
+			"display_name = %s, "
+			"speaker_flag = %d, "
+			"audio_mode_count = %d\n",
+			__func__,
+			sink->public.edid_caps.manufacturer_id,
+			sink->public.edid_caps.product_id,
+			sink->public.edid_caps.serial_number,
+			sink->public.edid_caps.manufacture_week,
+			sink->public.edid_caps.manufacture_year,
+			sink->public.edid_caps.display_name,
+			sink->public.edid_caps.speaker_flags,
+			sink->public.edid_caps.audio_mode_count);
+
+		for (i = 0; i < sink->public.edid_caps.audio_mode_count; i++) {
+			dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER,
+				"%s: mode number = %d, "
+				"format_code = %d, "
+				"channel_count = %d, "
+				"sample_rate = %d, "
+				"sample_size = %d\n",
+				__func__,
+				i,
+				sink->public.edid_caps.audio_modes[i].format_code,
+				sink->public.edid_caps.audio_modes[i].channel_count,
+				sink->public.edid_caps.audio_modes[i].sample_rate,
+				sink->public.edid_caps.audio_modes[i].sample_size);
+		}
+
+	} else {
+		/* From Connected-to-Disconnected. */
+		if (link->public.type == dc_connection_mst_branch) {
+			LINK_INFO("link=%d, mst branch is now Disconnected\n",
+				link->public.link_index);
+			dm_helpers_dp_mst_stop_top_mgr(link->ctx, &link->public);
+
+			link->mst_stream_alloc_table.stream_count = 0;
+			memset(link->mst_stream_alloc_table.stream_allocations, 0, sizeof(link->mst_stream_alloc_table.stream_allocations));
+		}
+
+		link->public.type = dc_connection_none;
+		sink_caps.signal = SIGNAL_TYPE_NONE;
+	}
+
+	LINK_INFO("link=%d, dc_sink_in=%p is now %s\n",
+		link->public.link_index, &sink->public,
+		(sink_caps.signal == SIGNAL_TYPE_NONE ?
+			"Disconnected":"Connected"));
+
+	return true;
+}
+
+static enum hpd_source_id get_hpd_line(
+		struct core_link *link)
+{
+	struct gpio *hpd;
+	enum hpd_source_id hpd_id = HPD_SOURCEID_UNKNOWN;
+
+	hpd = get_hpd_gpio(link);
+
+	if (hpd) {
+		switch (dal_irq_get_source(hpd)) {
+		case DC_IRQ_SOURCE_HPD1:
+			hpd_id = HPD_SOURCEID1;
+		break;
+		case DC_IRQ_SOURCE_HPD2:
+			hpd_id = HPD_SOURCEID2;
+		break;
+		case DC_IRQ_SOURCE_HPD3:
+			hpd_id = HPD_SOURCEID3;
+		break;
+		case DC_IRQ_SOURCE_HPD4:
+			hpd_id = HPD_SOURCEID4;
+		break;
+		case DC_IRQ_SOURCE_HPD5:
+			hpd_id = HPD_SOURCEID5;
+		break;
+		case DC_IRQ_SOURCE_HPD6:
+			hpd_id = HPD_SOURCEID6;
+		break;
+		default:
+			BREAK_TO_DEBUGGER();
+		break;
+		}
+
+		dal_gpio_destroy_irq(&hpd);
+	}
+
+	return hpd_id;
+}
+
+static enum channel_id get_ddc_line(struct core_link *link)
+{
+	struct ddc *ddc;
+	enum channel_id channel = CHANNEL_ID_UNKNOWN;
+
+	ddc = dal_ddc_service_get_ddc_pin(link->ddc);
+
+	if (ddc) {
+		switch (dal_ddc_get_line(ddc)) {
+		case GPIO_DDC_LINE_DDC1:
+			channel = CHANNEL_ID_DDC1;
+			break;
+		case GPIO_DDC_LINE_DDC2:
+			channel = CHANNEL_ID_DDC2;
+			break;
+		case GPIO_DDC_LINE_DDC3:
+			channel = CHANNEL_ID_DDC3;
+			break;
+		case GPIO_DDC_LINE_DDC4:
+			channel = CHANNEL_ID_DDC4;
+			break;
+		case GPIO_DDC_LINE_DDC5:
+			channel = CHANNEL_ID_DDC5;
+			break;
+		case GPIO_DDC_LINE_DDC6:
+			channel = CHANNEL_ID_DDC6;
+			break;
+		case GPIO_DDC_LINE_DDC_VGA:
+			channel = CHANNEL_ID_DDC_VGA;
+			break;
+		case GPIO_DDC_LINE_I2C_PAD:
+			channel = CHANNEL_ID_I2C_PAD;
+			break;
+		default:
+			BREAK_TO_DEBUGGER();
+			break;
+		}
+	}
+
+	return channel;
+}
+
+static enum transmitter translate_encoder_to_transmitter(
+	struct graphics_object_id encoder)
+{
+	switch (encoder.id) {
+	case ENCODER_ID_INTERNAL_UNIPHY:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_UNIPHY_A;
+		case ENUM_ID_2:
+			return TRANSMITTER_UNIPHY_B;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	case ENCODER_ID_INTERNAL_UNIPHY1:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_UNIPHY_C;
+		case ENUM_ID_2:
+			return TRANSMITTER_UNIPHY_D;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	case ENCODER_ID_INTERNAL_UNIPHY2:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_UNIPHY_E;
+		case ENUM_ID_2:
+			return TRANSMITTER_UNIPHY_F;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	case ENCODER_ID_INTERNAL_UNIPHY3:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_UNIPHY_G;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	case ENCODER_ID_EXTERNAL_NUTMEG:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_NUTMEG_CRT;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	case ENCODER_ID_EXTERNAL_TRAVIS:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_TRAVIS_CRT;
+		case ENUM_ID_2:
+			return TRANSMITTER_TRAVIS_LCD;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	default:
+		return TRANSMITTER_UNKNOWN;
+	}
+}
+
+static bool construct(
+	struct core_link *link,
+	const struct link_init_data *init_params)
+{
+	uint8_t i;
+	struct gpio *hpd_gpio = NULL;
+	struct ddc_service_init_data ddc_service_init_data = { 0 };
+	struct dc_context *dc_ctx = init_params->ctx;
+	struct encoder_init_data enc_init_data = { 0 };
+	struct integrated_info info = {{{ 0 }}};
+	struct dc_bios *bios = init_params->dc->ctx->dc_bios;
+	const struct dc_vbios_funcs *bp_funcs = bios->funcs;
+
+	link->public.irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+	link->public.irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
+
+	link->link_status.dpcd_caps = &link->dpcd_caps;
+
+	link->dc = init_params->dc;
+	link->ctx = dc_ctx;
+	link->public.link_index = init_params->link_index;
+
+	link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
+
+	if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
+		dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d!\n",
+				__func__, init_params->connector_index);
+		goto create_fail;
+	}
+
+	hpd_gpio = get_hpd_gpio(link);
+
+	if (hpd_gpio != NULL)
+		link->public.irq_source_hpd = dal_irq_get_source(hpd_gpio);
+
+	switch (link->link_id.id) {
+	case CONNECTOR_ID_HDMI_TYPE_A:
+		link->public.connector_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+
+		break;
+	case CONNECTOR_ID_SINGLE_LINK_DVID:
+	case CONNECTOR_ID_SINGLE_LINK_DVII:
+		link->public.connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+		break;
+	case CONNECTOR_ID_DUAL_LINK_DVID:
+	case CONNECTOR_ID_DUAL_LINK_DVII:
+		link->public.connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+		break;
+	case CONNECTOR_ID_DISPLAY_PORT:
+		link->public.connector_signal =	SIGNAL_TYPE_DISPLAY_PORT;
+
+		if (hpd_gpio != NULL)
+			link->public.irq_source_hpd_rx =
+					dal_irq_get_rx_source(hpd_gpio);
+
+		break;
+	case CONNECTOR_ID_EDP:
+		link->public.connector_signal = SIGNAL_TYPE_EDP;
+
+		if (hpd_gpio != NULL) {
+			link->public.irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+			link->public.irq_source_hpd_rx =
+					dal_irq_get_rx_source(hpd_gpio);
+		}
+		break;
+	default:
+		dm_logger_write(dc_ctx->logger, LOG_WARNING,
+			"Unsupported Connector type:%d!\n", link->link_id.id);
+		goto create_fail;
+	}
+
+	if (hpd_gpio != NULL) {
+		dal_gpio_destroy_irq(&hpd_gpio);
+		hpd_gpio = NULL;
+	}
+
+	/* TODO: #DAL3 Implement id to str function.*/
+	LINK_INFO("Connector[%d] description:"
+			"signal %d\n",
+			init_params->connector_index,
+			link->public.connector_signal);
+
+	ddc_service_init_data.ctx = link->ctx;
+	ddc_service_init_data.id = link->link_id;
+	ddc_service_init_data.link = link;
+	link->ddc = dal_ddc_service_create(&ddc_service_init_data);
+
+	if (NULL == link->ddc) {
+		DC_ERROR("Failed to create ddc_service!\n");
+		goto ddc_create_fail;
+	}
+
+	link->public.ddc_hw_inst =
+		dal_ddc_get_line(
+			dal_ddc_service_get_ddc_pin(link->ddc));
+
+	enc_init_data.ctx = dc_ctx;
+	bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, &enc_init_data.encoder);
+	enc_init_data.connector = link->link_id;
+	enc_init_data.channel = get_ddc_line(link);
+	enc_init_data.hpd_source = get_hpd_line(link);
+	enc_init_data.transmitter =
+			translate_encoder_to_transmitter(enc_init_data.encoder);
+	link->link_enc = link->dc->res_pool->funcs->link_enc_create(
+								&enc_init_data);
+
+	if( link->link_enc == NULL) {
+		DC_ERROR("Failed to create link encoder!\n");
+		goto link_enc_create_fail;
+	}
+
+	link->public.link_enc_hw_inst = link->link_enc->transmitter;
+
+	for (i = 0; i < 4; i++) {
+		if (BP_RESULT_OK !=
+				bp_funcs->get_device_tag(dc_ctx->dc_bios, link->link_id, i, &link->device_tag)) {
+			DC_ERROR("Failed to find device tag!\n");
+			goto device_tag_fail;
+		}
+
+		/* Look for device tag that matches connector signal,
+		 * CRT for rgb, LCD for other supported signal tyes
+		 */
+		if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, link->device_tag.dev_id))
+			continue;
+		if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT
+			&& link->public.connector_signal != SIGNAL_TYPE_RGB)
+			continue;
+		if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD
+			&& link->public.connector_signal == SIGNAL_TYPE_RGB)
+			continue;
+		if (link->device_tag.dev_id.device_type == DEVICE_TYPE_WIRELESS
+			&& link->public.connector_signal != SIGNAL_TYPE_WIRELESS)
+			continue;
+		break;
+	}
+
+	if (bios->integrated_info)
+		info = *bios->integrated_info;
+
+	/* Look for channel mapping corresponding to connector and device tag */
+	for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
+		struct external_display_path *path =
+			&info.ext_disp_conn_info.path[i];
+		if (path->device_connector_id.enum_id == link->link_id.enum_id
+			&& path->device_connector_id.id == link->link_id.id
+			&& path->device_connector_id.type == link->link_id.type
+			&& path->device_acpi_enum
+					== link->device_tag.acpi_device) {
+			link->ddi_channel_mapping = path->channel_mapping;
+			break;
+		}
+	}
+
+	/*
+	 * TODO check if GPIO programmed correctly
+	 *
+	 * If GPIO isn't programmed correctly HPD might not rise or drain
+	 * fast enough, leading to bounces.
+	 */
+	program_hpd_filter(link);
+
+	return true;
+device_tag_fail:
+	link->link_enc->funcs->destroy(&link->link_enc);
+link_enc_create_fail:
+	dal_ddc_service_destroy(&link->ddc);
+ddc_create_fail:
+create_fail:
+
+	if (hpd_gpio != NULL) {
+		dal_gpio_destroy_irq(&hpd_gpio);
+	}
+
+	return false;
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+struct core_link *link_create(const struct link_init_data *init_params)
+{
+	struct core_link *link =
+			dm_alloc(sizeof(*link));
+
+	if (NULL == link)
+		goto alloc_fail;
+
+	if (false == construct(link, init_params))
+		goto construct_fail;
+
+	return link;
+
+construct_fail:
+	dm_free(link);
+
+alloc_fail:
+	return NULL;
+}
+
+void link_destroy(struct core_link **link)
+{
+	destruct(*link);
+	dm_free(*link);
+	*link = NULL;
+}
+
+static void dpcd_configure_panel_mode(
+	struct core_link *link,
+	enum dp_panel_mode panel_mode)
+{
+	union dpcd_edp_config edp_config_set;
+	bool panel_mode_edp = false;
+
+	memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
+
+	if (DP_PANEL_MODE_DEFAULT != panel_mode) {
+
+		switch (panel_mode) {
+		case DP_PANEL_MODE_EDP:
+		case DP_PANEL_MODE_SPECIAL:
+			panel_mode_edp = true;
+			break;
+
+		default:
+			break;
+		}
+
+		/*set edp panel mode in receiver*/
+		core_link_read_dpcd(
+			link,
+			DPCD_ADDRESS_EDP_CONFIG_SET,
+			&edp_config_set.raw,
+			sizeof(edp_config_set.raw));
+
+		if (edp_config_set.bits.PANEL_MODE_EDP
+			!= panel_mode_edp) {
+			enum ddc_result result = DDC_RESULT_UNKNOWN;
+
+			edp_config_set.bits.PANEL_MODE_EDP =
+			panel_mode_edp;
+			result = core_link_write_dpcd(
+				link,
+				DPCD_ADDRESS_EDP_CONFIG_SET,
+				&edp_config_set.raw,
+				sizeof(edp_config_set.raw));
+
+			ASSERT(result == DDC_RESULT_SUCESSFULL);
+		}
+	}
+	dm_logger_write(link->ctx->logger, LOG_DETECTION_DP_CAPS,
+			"Link: %d eDP panel mode supported: %d "
+			"eDP panel mode enabled: %d \n",
+			link->public.link_index,
+			link->dpcd_caps.panel_mode_edp,
+			panel_mode_edp);
+}
+
+static void enable_stream_features(struct pipe_ctx *pipe_ctx)
+{
+	struct core_stream *stream = pipe_ctx->stream;
+	struct core_link *link = stream->sink->link;
+	union down_spread_ctrl downspread;
+
+	core_link_read_dpcd(link, DPCD_ADDRESS_DOWNSPREAD_CNTL,
+			&downspread.raw, sizeof(downspread));
+
+	downspread.bits.IGNORE_MSA_TIMING_PARAM =
+			(stream->public.ignore_msa_timing_param) ? 1 : 0;
+
+	core_link_write_dpcd(link, DPCD_ADDRESS_DOWNSPREAD_CNTL,
+			&downspread.raw, sizeof(downspread));
+}
+
+static enum dc_status enable_link_dp(struct pipe_ctx *pipe_ctx)
+{
+	struct core_stream *stream = pipe_ctx->stream;
+	enum dc_status status;
+	bool skip_video_pattern;
+	struct core_link *link = stream->sink->link;
+	struct dc_link_settings link_settings = {0};
+	enum dp_panel_mode panel_mode;
+	enum clocks_state cur_min_clock_state;
+	enum dc_link_rate max_link_rate = LINK_RATE_HIGH2;
+
+	/* get link settings for video mode timing */
+	decide_link_settings(stream, &link_settings);
+
+	/* raise clock state for HBR3 if required. Confirmed with HW DCE/DPCS
+	 * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
+	 */
+	if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
+		max_link_rate = LINK_RATE_HIGH3;
+
+	if (link_settings.link_rate == max_link_rate) {
+		cur_min_clock_state = CLOCKS_STATE_INVALID;
+
+		if (dal_display_clock_get_min_clocks_state(
+				pipe_ctx->dis_clk, &cur_min_clock_state)) {
+			if (cur_min_clock_state < CLOCKS_STATE_NOMINAL)
+				dal_display_clock_set_min_clocks_state(
+						pipe_ctx->dis_clk,
+						CLOCKS_STATE_NOMINAL);
+		} else {
+		}
+	}
+
+	dp_enable_link_phy(
+		link,
+		pipe_ctx->stream->signal,
+		pipe_ctx->clock_source->id,
+		&link_settings);
+
+	panel_mode = dp_get_panel_mode(link);
+	dpcd_configure_panel_mode(link, panel_mode);
+
+	skip_video_pattern = true;
+
+	if (link_settings.link_rate == LINK_RATE_LOW)
+			skip_video_pattern = false;
+
+	if (perform_link_training_with_retries(
+			link,
+			&link_settings,
+			skip_video_pattern,
+			LINK_TRAINING_ATTEMPTS)) {
+		link->public.cur_link_settings = link_settings;
+		status = DC_OK;
+	}
+	else
+		status = DC_ERROR_UNEXPECTED;
+
+	enable_stream_features(pipe_ctx);
+
+	return status;
+}
+
+static enum dc_status enable_link_dp_mst(struct pipe_ctx *pipe_ctx)
+{
+	struct core_link *link = pipe_ctx->stream->sink->link;
+
+	/* sink signal type after MST branch is MST. Multiple MST sinks
+	 * share one link. Link DP PHY is enable or training only once.
+	 */
+	if (link->public.cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
+		return DC_OK;
+
+	return enable_link_dp(pipe_ctx);
+}
+
+static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
+{
+	struct core_stream *stream = pipe_ctx->stream;
+	struct core_link *link = stream->sink->link;
+
+	if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+		dal_ddc_service_write_scdc_data(
+			stream->sink->link->ddc,
+			stream->phy_pix_clk,
+			stream->public.timing.flags.LTE_340MCSC_SCRAMBLE);
+
+	memset(&stream->sink->link->public.cur_link_settings, 0,
+			sizeof(struct dc_link_settings));
+
+	link->link_enc->funcs->enable_tmds_output(
+			link->link_enc,
+			pipe_ctx->clock_source->id,
+			stream->public.timing.display_color_depth,
+			pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A,
+			pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK,
+			stream->phy_pix_clk);
+
+	if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+		dal_ddc_service_read_scdc_data(link->ddc);
+}
+
+/****************************enable_link***********************************/
+static enum dc_status enable_link(struct pipe_ctx *pipe_ctx)
+{
+	enum dc_status status = DC_ERROR_UNEXPECTED;
+	switch (pipe_ctx->stream->signal) {
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_EDP:
+		status = enable_link_dp(pipe_ctx);
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		status = enable_link_dp_mst(pipe_ctx);
+		msleep(200);
+		break;
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		enable_link_hdmi(pipe_ctx);
+		status = DC_OK;
+		break;
+	case SIGNAL_TYPE_VIRTUAL:
+		status = DC_OK;
+		break;
+	default:
+		break;
+	}
+
+	if (pipe_ctx->audio && status == DC_OK) {
+		/* notify audio driver for audio modes of monitor */
+		pipe_ctx->audio->funcs->az_enable(pipe_ctx->audio);
+
+		/* un-mute audio */
+		/* TODO: audio should be per stream rather than per link */
+		pipe_ctx->stream_enc->funcs->audio_mute_control(
+			pipe_ctx->stream_enc, false);
+	}
+
+	return status;
+}
+
+static void disable_link(struct core_link *link, enum signal_type signal)
+{
+	/*
+	 * TODO: implement call for dp_set_hw_test_pattern
+	 * it is needed for compliance testing
+	 */
+
+	/* here we need to specify that encoder output settings
+	 * need to be calculated as for the set mode,
+	 * it will lead to querying dynamic link capabilities
+	 * which should be done before enable output */
+
+	if (dc_is_dp_signal(signal)) {
+		/* SST DP, eDP */
+		if (dc_is_dp_sst_signal(signal))
+			dp_disable_link_phy(link, signal);
+		else
+			dp_disable_link_phy_mst(link, signal);
+	} else
+		link->link_enc->funcs->disable_output(link->link_enc, signal);
+}
+
+enum dc_status dc_link_validate_mode_timing(
+		const struct core_stream *stream,
+		struct core_link *link,
+		const struct dc_crtc_timing *timing)
+{
+	uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
+
+	/* A hack to avoid failing any modes for EDID override feature on
+	 * topology change such as lower quality cable for DP or different dongle
+	 */
+	if (link->public.remote_sinks[0])
+		return DC_OK;
+
+	if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk)
+		return DC_EXCEED_DONGLE_MAX_CLK;
+
+	switch (stream->signal) {
+	case SIGNAL_TYPE_EDP:
+	case SIGNAL_TYPE_DISPLAY_PORT:
+		if (!dp_validate_mode_timing(
+				link,
+				timing))
+			return DC_NO_DP_LINK_BANDWIDTH;
+		break;
+
+	default:
+		break;
+	}
+
+	return DC_OK;
+}
+
+
+bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
+		uint32_t frame_ramp, const struct dc_stream *stream)
+{
+	struct core_link *link = DC_LINK_TO_CORE(dc_link);
+	struct dc_context *ctx = link->ctx;
+	struct core_dc *core_dc = DC_TO_CORE(ctx->dc);
+	struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
+	unsigned int controller_id = 0;
+	int i;
+	uint32_t dmcu_status;
+
+	dm_logger_write(ctx->logger, LOG_BACKLIGHT,
+			"New Backlight level: %d (0x%X)\n", level, level);
+
+	dmcu_status = dm_read_reg(ctx, mmDMCU_STATUS);
+
+	/* If DMCU is in reset state, DMCU is uninitialized */
+	if (get_reg_field_value(dmcu_status, mmDMCU_STATUS, UC_IN_RESET)) {
+		link->link_enc->funcs->set_lcd_backlight_level(link->link_enc,
+						level);
+	} else {
+		for (i = 0; i < MAX_PIPES; i++) {
+			if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
+					== core_stream)
+				/* dmcu -1 for all controller id values,
+				 * therefore +1 here
+				 */
+				controller_id = core_dc->current_context->res_ctx.
+						pipe_ctx[i].tg->inst + 1;
+		}
+
+		link->link_enc->funcs->set_dmcu_backlight_level
+				(link->link_enc, level,
+				frame_ramp, controller_id);
+	}
+	return true;
+}
+
+
+bool dc_link_init_dmcu_backlight_settings(const struct dc_link *dc_link)
+{
+	struct core_link *link = DC_LINK_TO_CORE(dc_link);
+
+	if (link->link_enc->funcs->init_dmcu_backlight_settings != NULL)
+		link->link_enc->funcs->
+			init_dmcu_backlight_settings(link->link_enc);
+
+	return true;
+}
+
+bool dc_link_set_abm_level(const struct dc_link *dc_link, uint32_t level)
+{
+	struct core_link *link = DC_LINK_TO_CORE(dc_link);
+	struct dc_context *ctx = link->ctx;
+
+	dm_logger_write(ctx->logger, LOG_BACKLIGHT,
+			"New abm level: %d (0x%X)\n", level, level);
+
+	link->link_enc->funcs->set_dmcu_abm_level(link->link_enc, level);
+	return true;
+}
+
+bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable)
+{
+	struct core_link *link = DC_LINK_TO_CORE(dc_link);
+
+	if (dc_link != NULL && dc_link->psr_caps.psr_version > 0)
+		link->link_enc->funcs->set_dmcu_psr_enable(link->link_enc,
+								enable);
+	return true;
+}
+
+bool dc_link_setup_psr(const struct dc_link *dc_link,
+		const struct dc_stream *stream)
+{
+
+	struct core_link *link = DC_LINK_TO_CORE(dc_link);
+	struct dc_context *ctx = link->ctx;
+	struct core_dc *core_dc = DC_TO_CORE(ctx->dc);
+	struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
+	struct psr_dmcu_context psr_context = {0};
+	int i;
+
+	psr_context.controllerId = CONTROLLER_ID_UNDEFINED;
+
+
+	if (dc_link != NULL && dc_link->psr_caps.psr_version > 0) {
+		/* updateSinkPsrDpcdConfig*/
+		union dpcd_psr_configuration psr_configuration;
+
+		memset(&psr_configuration, 0, sizeof(psr_configuration));
+
+		psr_configuration.bits.ENABLE                    = 1;
+		psr_configuration.bits.CRC_VERIFICATION          = 1;
+		psr_configuration.bits.FRAME_CAPTURE_INDICATION  =
+			dc_link->psr_caps.psr_frame_capture_indication_req;
+
+		/* Check for PSR v2*/
+		if (dc_link->psr_caps.psr_version == 0x2) {
+			/* For PSR v2 selective update.
+			 * Indicates whether sink should start capturing
+			 * immediately following active scan line,
+			 * or starting with the 2nd active scan line.
+			 */
+			psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
+			/*For PSR v2, determines whether Sink should generate
+			 * IRQ_HPD when CRC mismatch is detected.
+			 */
+			psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR    = 1;
+		}
+		dal_ddc_service_write_dpcd_data(
+					link->ddc,
+					368,
+					&psr_configuration.raw,
+					sizeof(psr_configuration.raw));
+
+		psr_context.channel = link->ddc->ddc_pin->hw_info.ddc_channel;
+		if (psr_context.channel == 0)
+			psr_context.channel = 1;
+		psr_context.transmitterId = link->link_enc->transmitter;
+		psr_context.engineId = link->link_enc->preferred_engine;
+
+		for (i = 0; i < MAX_PIPES; i++) {
+			if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
+					== core_stream) {
+				/* dmcu -1 for all controller id values,
+				 * therefore +1 here
+				 */
+				psr_context.controllerId =
+					core_dc->current_context->res_ctx.
+					pipe_ctx[i].tg->inst + 1;
+				break;
+			}
+		}
+
+		/* Hardcoded for now.  Can be Pcie or Uniphy (or Unknown)*/
+		psr_context.phyType = PHY_TYPE_UNIPHY;
+		/*PhyId is associated with the transmitter id*/
+		psr_context.smuPhyId = link->link_enc->transmitter;
+
+		psr_context.crtcTimingVerticalTotal = stream->timing.v_total;
+		psr_context.vsyncRateHz = div64_u64(div64_u64((stream->
+						timing.pix_clk_khz * 1000),
+						stream->timing.v_total),
+						stream->timing.h_total);
+
+		psr_context.psrSupportedDisplayConfig =
+			(dc_link->psr_caps.psr_version > 0) ? true : false;
+		psr_context.psrExitLinkTrainingRequired =
+			dc_link->psr_caps.psr_exit_link_training_required;
+		psr_context.sdpTransmitLineNumDeadline =
+			dc_link->psr_caps.psr_sdp_transmit_line_num_deadline;
+		psr_context.psrFrameCaptureIndicationReq =
+			dc_link->psr_caps.psr_frame_capture_indication_req;
+
+		psr_context.skipPsrWaitForPllLock = 0; /* only = 1 in KV */
+
+		psr_context.numberOfControllers =
+				link->dc->res_pool->res_cap->num_timing_generator;
+
+		psr_context.rfb_update_auto_en = true;
+
+		/* 2 frames before enter PSR. */
+		psr_context.timehyst_frames = 2;
+		/* half a frame
+		 * (units in 100 lines, i.e. a value of 1 represents 100 lines)
+		 */
+		psr_context.hyst_lines = stream->timing.v_total / 2 / 100;
+		psr_context.aux_repeats = 10;
+
+		psr_context.psr_level.u32all = 0;
+
+		/* SMU will perform additional powerdown sequence.
+		 * For unsupported ASICs, set psr_level flag to skip PSR
+		 *  static screen notification to SMU.
+		 *  (Always set for DAL2, did not check ASIC)
+		 */
+		psr_context.psr_level.bits.SKIP_SMU_NOTIFICATION = 1;
+
+		/* Controls additional delay after remote frame capture before
+		 * continuing power down, default = 0
+		 */
+		psr_context.frame_delay = 0;
+
+		link->link_enc->funcs->setup_dmcu_psr
+			(link->link_enc, &psr_context);
+		return true;
+	} else
+		return false;
+
+}
+
+const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link)
+{
+	struct core_link *link = DC_LINK_TO_CORE(dc_link);
+
+	return &link->link_status;
+}
+
+void core_link_resume(struct core_link *link)
+{
+	if (link->public.connector_signal != SIGNAL_TYPE_VIRTUAL)
+		program_hpd_filter(link);
+}
+
+static struct fixed31_32 get_pbn_per_slot(struct core_stream *stream)
+{
+	struct dc_link_settings *link_settings =
+			&stream->sink->link->public.cur_link_settings;
+	uint32_t link_rate_in_mbps =
+			link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
+	struct fixed31_32 mbps = dal_fixed31_32_from_int(
+			link_rate_in_mbps * link_settings->lane_count);
+
+	return dal_fixed31_32_div_int(mbps, 54);
+}
+
+static int get_color_depth(enum dc_color_depth color_depth)
+{
+	switch (color_depth) {
+	case COLOR_DEPTH_666: return 6;
+	case COLOR_DEPTH_888: return 8;
+	case COLOR_DEPTH_101010: return 10;
+	case COLOR_DEPTH_121212: return 12;
+	case COLOR_DEPTH_141414: return 14;
+	case COLOR_DEPTH_161616: return 16;
+	default: return 0;
+	}
+}
+
+static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
+{
+	uint32_t bpc;
+	uint64_t kbps;
+	struct fixed31_32 peak_kbps;
+	uint32_t numerator;
+	uint32_t denominator;
+
+	bpc = get_color_depth(pipe_ctx->pix_clk_params.color_depth);
+	kbps = pipe_ctx->pix_clk_params.requested_pix_clk * bpc * 3;
+
+	/*
+	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+	 * common multiplier to render an integer PBN for all link rate/lane
+	 * counts combinations
+	 * calculate
+	 * peak_kbps *= (1006/1000)
+	 * peak_kbps *= (64/54)
+	 * peak_kbps *= 8    convert to bytes
+	 */
+
+	numerator = 64 * PEAK_FACTOR_X1000;
+	denominator = 54 * 8 * 1000 * 1000;
+	kbps *= numerator;
+	peak_kbps = dal_fixed31_32_from_fraction(kbps, denominator);
+
+	return peak_kbps;
+}
+
+static void update_mst_stream_alloc_table(
+	struct core_link *link,
+	struct stream_encoder *stream_enc,
+	const struct dp_mst_stream_allocation_table *proposed_table)
+{
+	struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = {
+			{ 0 } };
+	struct link_mst_stream_allocation *dc_alloc;
+
+	int i;
+	int j;
+
+	/* if DRM proposed_table has more than one new payload */
+	ASSERT(proposed_table->stream_count -
+			link->mst_stream_alloc_table.stream_count < 2);
+
+	/* copy proposed_table to core_link, add stream encoder */
+	for (i = 0; i < proposed_table->stream_count; i++) {
+
+		for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) {
+			dc_alloc =
+			&link->mst_stream_alloc_table.stream_allocations[j];
+
+			if (dc_alloc->vcp_id ==
+				proposed_table->stream_allocations[i].vcp_id) {
+
+				work_table[i] = *dc_alloc;
+				break; /* exit j loop */
+			}
+		}
+
+		/* new vcp_id */
+		if (j == link->mst_stream_alloc_table.stream_count) {
+			work_table[i].vcp_id =
+				proposed_table->stream_allocations[i].vcp_id;
+			work_table[i].slot_count =
+				proposed_table->stream_allocations[i].slot_count;
+			work_table[i].stream_enc = stream_enc;
+		}
+	}
+
+	/* update link->mst_stream_alloc_table with work_table */
+	link->mst_stream_alloc_table.stream_count =
+			proposed_table->stream_count;
+	for (i = 0; i < MAX_CONTROLLER_NUM; i++)
+		link->mst_stream_alloc_table.stream_allocations[i] =
+				work_table[i];
+}
+
+/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
+ * because stream_encoder is not exposed to dm
+ */
+static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
+{
+	struct core_stream *stream = pipe_ctx->stream;
+	struct core_link *link = stream->sink->link;
+	struct link_encoder *link_encoder = link->link_enc;
+	struct stream_encoder *stream_encoder = pipe_ctx->stream_enc;
+	struct dp_mst_stream_allocation_table proposed_table = {0};
+	struct fixed31_32 avg_time_slots_per_mtp;
+	struct fixed31_32 pbn;
+	struct fixed31_32 pbn_per_slot;
+	uint8_t i;
+
+	/* enable_link_dp_mst already check link->enabled_stream_count
+	 * and stream is in link->stream[]. This is called during set mode,
+	 * stream_enc is available.
+	 */
+
+	/* get calculate VC payload for stream: stream_alloc */
+	if (dm_helpers_dp_mst_write_payload_allocation_table(
+		stream->ctx,
+		&stream->public,
+		&proposed_table,
+		true)) {
+		update_mst_stream_alloc_table(
+					link, pipe_ctx->stream_enc, &proposed_table);
+	}
+	else
+		dm_logger_write(link->ctx->logger, LOG_WARNING,
+				"Failed to update"
+				"MST allocation table for"
+				"pipe idx:%d\n",
+				pipe_ctx->pipe_idx);
+
+	dm_logger_write(link->ctx->logger, LOG_MST,
+			"%s  "
+			"stream_count: %d: \n ",
+			__func__,
+			link->mst_stream_alloc_table.stream_count);
+
+	for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+		dm_logger_write(link->ctx->logger, LOG_MST,
+		"stream_enc[%d]: 0x%x      "
+		"stream[%d].vcp_id: %d      "
+		"stream[%d].slot_count: %d\n",
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+	}
+
+	ASSERT(proposed_table.stream_count > 0);
+
+	/* program DP source TX for payload */
+	link_encoder->funcs->update_mst_stream_allocation_table(
+		link_encoder,
+		&link->mst_stream_alloc_table);
+
+	/* send down message */
+	dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+			stream->ctx,
+			&stream->public);
+
+	dm_helpers_dp_mst_send_payload_allocation(
+			stream->ctx,
+			&stream->public,
+			true);
+
+	/* slot X.Y for only current stream */
+	pbn_per_slot = get_pbn_per_slot(stream);
+	pbn = get_pbn_from_timing(pipe_ctx);
+	avg_time_slots_per_mtp = dal_fixed31_32_div(pbn, pbn_per_slot);
+
+	stream_encoder->funcs->set_mst_bandwidth(
+		stream_encoder,
+		avg_time_slots_per_mtp);
+
+	return DC_OK;
+
+}
+
+static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+{
+	struct core_stream *stream = pipe_ctx->stream;
+	struct core_link *link = stream->sink->link;
+	struct link_encoder *link_encoder = link->link_enc;
+	struct stream_encoder *stream_encoder = pipe_ctx->stream_enc;
+	struct dp_mst_stream_allocation_table proposed_table = {0};
+	struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
+	uint8_t i;
+	bool mst_mode = (link->public.type == dc_connection_mst_branch);
+
+	/* deallocate_mst_payload is called before disable link. When mode or
+	 * disable/enable monitor, new stream is created which is not in link
+	 * stream[] yet. For this, payload is not allocated yet, so de-alloc
+	 * should not done. For new mode set, map_resources will get engine
+	 * for new stream, so stream_enc->id should be validated until here.
+	 */
+
+	/* slot X.Y */
+	stream_encoder->funcs->set_mst_bandwidth(
+		stream_encoder,
+		avg_time_slots_per_mtp);
+
+	/* TODO: which component is responsible for remove payload table? */
+	if (mst_mode) {
+		if (dm_helpers_dp_mst_write_payload_allocation_table(
+				stream->ctx,
+				&stream->public,
+				&proposed_table,
+				false)) {
+
+			update_mst_stream_alloc_table(
+				link, pipe_ctx->stream_enc, &proposed_table);
+		}
+		else {
+				dm_logger_write(link->ctx->logger, LOG_WARNING,
+						"Failed to update"
+						"MST allocation table for"
+						"pipe idx:%d\n",
+						pipe_ctx->pipe_idx);
+		}
+	}
+
+	dm_logger_write(link->ctx->logger, LOG_MST,
+			"%s"
+			"stream_count: %d: ",
+			__func__,
+			link->mst_stream_alloc_table.stream_count);
+
+	for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+		dm_logger_write(link->ctx->logger, LOG_MST,
+		"stream_enc[%d]: 0x%x      "
+		"stream[%d].vcp_id: %d      "
+		"stream[%d].slot_count: %d\n",
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+	}
+
+	link_encoder->funcs->update_mst_stream_allocation_table(
+		link_encoder,
+		&link->mst_stream_alloc_table);
+
+	if (mst_mode) {
+		dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+			stream->ctx,
+			&stream->public);
+
+		dm_helpers_dp_mst_send_payload_allocation(
+			stream->ctx,
+			&stream->public,
+			false);
+	}
+
+	return DC_OK;
+}
+
+void core_link_enable_stream(struct pipe_ctx *pipe_ctx)
+{
+	struct core_dc *core_dc = DC_TO_CORE(pipe_ctx->stream->ctx->dc);
+
+	if (DC_OK != enable_link(pipe_ctx)) {
+			BREAK_TO_DEBUGGER();
+			return;
+	}
+
+	core_dc->hwss.enable_stream(pipe_ctx);
+
+	if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+		allocate_mst_payload(pipe_ctx);
+}
+
+void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
+{
+	struct core_dc *core_dc = DC_TO_CORE(pipe_ctx->stream->ctx->dc);
+
+	if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+		deallocate_mst_payload(pipe_ctx);
+
+	core_dc->hwss.disable_stream(pipe_ctx);
+
+	disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
+}
+

+ 1098 - 0
drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c

@@ -0,0 +1,1098 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "gpio_service_interface.h"
+#include "include/ddc_service_types.h"
+#include "include/grph_object_id.h"
+#include "include/dpcd_defs.h"
+#include "include/logger_interface.h"
+#include "include/vector.h"
+#include "core_types.h"
+#include "dc_link_ddc.h"
+
+#define AUX_POWER_UP_WA_DELAY 500
+#define I2C_OVER_AUX_DEFER_WA_DELAY 70
+
+/* CV smart dongle slave address for retrieving supported HDTV modes*/
+#define CV_SMART_DONGLE_ADDRESS 0x20
+/* DVI-HDMI dongle slave address for retrieving dongle signature*/
+#define DVI_HDMI_DONGLE_ADDRESS 0x68
+static const int8_t dvi_hdmi_dongle_signature_str[] = "6140063500G";
+struct dvi_hdmi_dongle_signature_data {
+	int8_t vendor[3];/* "AMD" */
+	uint8_t version[2];
+	uint8_t size;
+	int8_t id[11];/* "6140063500G"*/
+};
+/* DP-HDMI dongle slave address for retrieving dongle signature*/
+#define DP_HDMI_DONGLE_ADDRESS 0x40
+static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR";
+#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04
+
+struct dp_hdmi_dongle_signature_data {
+	int8_t id[15];/* "DP-HDMI ADAPTOR"*/
+	uint8_t eot;/* end of transmition '\x4' */
+};
+
+/* Address range from 0x00 to 0x1F.*/
+#define DP_ADAPTOR_TYPE2_SIZE 0x20
+#define DP_ADAPTOR_TYPE2_REG_ID 0x10
+#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D
+/* Identifies adaptor as Dual-mode adaptor */
+#define DP_ADAPTOR_TYPE2_ID 0xA0
+/* MHz*/
+#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600
+/* MHz*/
+#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25
+/* kHZ*/
+#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000
+/* kHZ*/
+#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000
+
+#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW
+
+enum edid_read_result {
+	EDID_READ_RESULT_EDID_MATCH = 0,
+	EDID_READ_RESULT_EDID_MISMATCH,
+	EDID_READ_RESULT_CHECKSUM_READ_ERR,
+	EDID_READ_RESULT_VENDOR_READ_ERR
+};
+
+/* SCDC Address defines (HDMI 2.0)*/
+#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3
+#define HDMI_SCDC_ADDRESS  0x54
+#define HDMI_SCDC_SINK_VERSION 0x01
+#define HDMI_SCDC_SOURCE_VERSION 0x02
+#define HDMI_SCDC_UPDATE_0 0x10
+#define HDMI_SCDC_TMDS_CONFIG 0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
+#define HDMI_SCDC_CONFIG_0 0x30
+#define HDMI_SCDC_STATUS_FLAGS 0x40
+#define HDMI_SCDC_ERR_DETECT 0x50
+#define HDMI_SCDC_TEST_CONFIG 0xC0
+
+union hdmi_scdc_update_read_data {
+	uint8_t byte[2];
+	struct {
+		uint8_t STATUS_UPDATE:1;
+		uint8_t CED_UPDATE:1;
+		uint8_t RR_TEST:1;
+		uint8_t RESERVED:5;
+		uint8_t RESERVED2:8;
+	} fields;
+};
+
+union hdmi_scdc_status_flags_data {
+	uint8_t byte[2];
+	struct {
+		uint8_t CLOCK_DETECTED:1;
+		uint8_t CH0_LOCKED:1;
+		uint8_t CH1_LOCKED:1;
+		uint8_t CH2_LOCKED:1;
+		uint8_t RESERVED:4;
+		uint8_t RESERVED2:8;
+	} fields;
+};
+
+union hdmi_scdc_ced_data {
+	uint8_t byte[7];
+	struct {
+		uint8_t CH0_8LOW:8;
+		uint8_t CH0_7HIGH:7;
+		uint8_t CH0_VALID:1;
+		uint8_t CH1_8LOW:8;
+		uint8_t CH1_7HIGH:7;
+		uint8_t CH1_VALID:1;
+		uint8_t CH2_8LOW:8;
+		uint8_t CH2_7HIGH:7;
+		uint8_t CH2_VALID:1;
+		uint8_t CHECKSUM:8;
+	} fields;
+};
+
+union hdmi_scdc_test_config_Data {
+	uint8_t byte;
+	struct {
+		uint8_t TEST_READ_REQUEST_DELAY:7;
+		uint8_t TEST_READ_REQUEST: 1;
+	} fields;
+};
+
+struct i2c_payloads {
+	struct vector payloads;
+};
+
+struct aux_payloads {
+	struct vector payloads;
+};
+
+struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count)
+{
+	struct i2c_payloads *payloads;
+
+	payloads = dm_alloc(sizeof(struct i2c_payloads));
+
+	if (!payloads)
+		return NULL;
+
+	if (dal_vector_construct(
+		&payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
+		return payloads;
+
+	dm_free(payloads);
+	return NULL;
+
+}
+
+struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
+{
+	return (struct i2c_payload *)p->payloads.container;
+}
+
+uint32_t  dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
+{
+	return p->payloads.count;
+}
+
+void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
+{
+	if (!p || !*p)
+		return;
+	dal_vector_destruct(&(*p)->payloads);
+	dm_free(*p);
+	*p = NULL;
+
+}
+
+struct aux_payloads *dal_ddc_aux_payloads_create(struct dc_context *ctx, uint32_t count)
+{
+	struct aux_payloads *payloads;
+
+	payloads = dm_alloc(sizeof(struct aux_payloads));
+
+	if (!payloads)
+		return NULL;
+
+	if (dal_vector_construct(
+		&payloads->payloads, ctx, count, sizeof(struct aux_payloads)))
+		return payloads;
+
+	dm_free(payloads);
+	return NULL;
+}
+
+struct aux_payload *dal_ddc_aux_payloads_get(struct aux_payloads *p)
+{
+	return (struct aux_payload *)p->payloads.container;
+}
+
+uint32_t  dal_ddc_aux_payloads_get_count(struct aux_payloads *p)
+{
+	return p->payloads.count;
+}
+
+void dal_ddc_aux_payloads_destroy(struct aux_payloads **p)
+{
+	if (!p || !*p)
+		return;
+
+	dal_vector_destruct(&(*p)->payloads);
+	dm_free(*p);
+	*p = NULL;
+}
+
+#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+void dal_ddc_i2c_payloads_add(
+	struct i2c_payloads *payloads,
+	uint32_t address,
+	uint32_t len,
+	uint8_t *data,
+	bool write)
+{
+	uint32_t payload_size = EDID_SEGMENT_SIZE;
+	uint32_t pos;
+
+	for (pos = 0; pos < len; pos += payload_size) {
+		struct i2c_payload payload = {
+			.write = write,
+			.address = address,
+			.length = DDC_MIN(payload_size, len - pos),
+			.data = data + pos };
+		dal_vector_append(&payloads->payloads, &payload);
+	}
+
+}
+
+void dal_ddc_aux_payloads_add(
+	struct aux_payloads *payloads,
+	uint32_t address,
+	uint32_t len,
+	uint8_t *data,
+	bool write)
+{
+	uint32_t payload_size = DEFAULT_AUX_MAX_DATA_SIZE;
+	uint32_t pos;
+
+	for (pos = 0; pos < len; pos += payload_size) {
+		struct aux_payload payload = {
+			.i2c_over_aux = true,
+			.write = write,
+			.address = address,
+			.length = DDC_MIN(payload_size, len - pos),
+			.data = data + pos };
+		dal_vector_append(&payloads->payloads, &payload);
+	}
+}
+
+static bool construct(
+	struct ddc_service *ddc_service,
+	struct ddc_service_init_data *init_data)
+{
+	enum connector_id connector_id =
+		dal_graphics_object_id_get_connector_id(init_data->id);
+
+	struct gpio_service *gpio_service = init_data->ctx->gpio_service;
+	struct graphics_object_i2c_info i2c_info;
+	struct gpio_ddc_hw_info hw_info;
+	struct dc_bios *dcb = init_data->ctx->dc_bios;
+
+	ddc_service->link = init_data->link;
+	ddc_service->ctx = init_data->ctx;
+
+	if (BP_RESULT_OK != dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info)) {
+		ddc_service->ddc_pin = NULL;
+	} else {
+		hw_info.ddc_channel = i2c_info.i2c_line;
+		hw_info.hw_supported = i2c_info.i2c_hw_assist;
+
+		ddc_service->ddc_pin = dal_gpio_create_ddc(
+			gpio_service,
+			i2c_info.gpio_info.clk_a_register_index,
+			1 << i2c_info.gpio_info.clk_a_shift,
+			&hw_info);
+	}
+
+	ddc_service->flags.EDID_QUERY_DONE_ONCE = false;
+	ddc_service->flags.FORCE_READ_REPEATED_START = false;
+	ddc_service->flags.EDID_STRESS_READ = false;
+
+	ddc_service->flags.IS_INTERNAL_DISPLAY =
+		connector_id == CONNECTOR_ID_EDP ||
+		connector_id == CONNECTOR_ID_LVDS;
+
+	ddc_service->wa.raw = 0;
+	return true;
+}
+
+struct ddc_service *dal_ddc_service_create(
+	struct ddc_service_init_data *init_data)
+{
+	struct ddc_service *ddc_service;
+
+	ddc_service = dm_alloc(sizeof(struct ddc_service));
+
+	if (!ddc_service)
+		return NULL;
+
+	if (construct(ddc_service, init_data))
+		return ddc_service;
+
+	dm_free(ddc_service);
+	return NULL;
+}
+
+static void destruct(struct ddc_service *ddc)
+{
+	if (ddc->ddc_pin)
+		dal_gpio_destroy_ddc(&ddc->ddc_pin);
+}
+
+void dal_ddc_service_destroy(struct ddc_service **ddc)
+{
+	if (!ddc || !*ddc) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+	destruct(*ddc);
+	dm_free(*ddc);
+	*ddc = NULL;
+}
+
+enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc)
+{
+	return DDC_SERVICE_TYPE_CONNECTOR;
+}
+
+void dal_ddc_service_set_transaction_type(
+	struct ddc_service *ddc,
+	enum ddc_transaction_type type)
+{
+	ddc->transaction_type = type;
+}
+
+bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc)
+{
+	switch (ddc->transaction_type) {
+	case DDC_TRANSACTION_TYPE_I2C_OVER_AUX:
+	case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER:
+	case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_RETRY_DEFER:
+		return true;
+	default:
+		break;
+	}
+	return false;
+}
+
+void ddc_service_set_dongle_type(struct ddc_service *ddc,
+		enum display_dongle_type dongle_type)
+{
+	ddc->dongle_type = dongle_type;
+}
+
+static uint32_t defer_delay_converter_wa(
+	struct ddc_service *ddc,
+	uint32_t defer_delay)
+{
+	struct core_link *link = ddc->link;
+
+	if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_4 &&
+		!memcmp(link->dpcd_caps.branch_dev_name,
+			DP_DVI_CONVERTER_ID_4,
+			sizeof(link->dpcd_caps.branch_dev_name)))
+		return defer_delay > I2C_OVER_AUX_DEFER_WA_DELAY ?
+			defer_delay : I2C_OVER_AUX_DEFER_WA_DELAY;
+
+	return defer_delay;
+}
+
+#define DP_TRANSLATOR_DELAY 5
+
+static uint32_t get_defer_delay(struct ddc_service *ddc)
+{
+	uint32_t defer_delay = 0;
+
+	switch (ddc->transaction_type) {
+	case DDC_TRANSACTION_TYPE_I2C_OVER_AUX:
+		if ((DISPLAY_DONGLE_DP_VGA_CONVERTER == ddc->dongle_type) ||
+			(DISPLAY_DONGLE_DP_DVI_CONVERTER == ddc->dongle_type) ||
+			(DISPLAY_DONGLE_DP_HDMI_CONVERTER ==
+				ddc->dongle_type)) {
+
+			defer_delay = DP_TRANSLATOR_DELAY;
+
+			defer_delay =
+				defer_delay_converter_wa(ddc, defer_delay);
+
+		} else /*sink has a delay different from an Active Converter*/
+			defer_delay = 0;
+		break;
+	case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER:
+		defer_delay = DP_TRANSLATOR_DELAY;
+		break;
+	default:
+		break;
+	}
+	return defer_delay;
+}
+
+static bool i2c_read(
+	struct ddc_service *ddc,
+	uint32_t address,
+	uint8_t *buffer,
+	uint32_t len)
+{
+	uint8_t offs_data = 0;
+	struct i2c_payload payloads[2] = {
+		{
+		.write = true,
+		.address = address,
+		.length = 1,
+		.data = &offs_data },
+		{
+		.write = false,
+		.address = address,
+		.length = len,
+		.data = buffer } };
+
+	struct i2c_command command = {
+		.payloads = payloads,
+		.number_of_payloads = 2,
+		.engine = DDC_I2C_COMMAND_ENGINE,
+		.speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+
+	return dm_helpers_submit_i2c(
+			ddc->ctx,
+			&ddc->link->public,
+			&command);
+}
+
+static uint8_t aux_read_edid_block(
+	struct ddc_service *ddc,
+	uint8_t address,
+	uint8_t index,
+	uint8_t *buf)
+{
+	struct aux_command cmd = {
+		.payloads = NULL,
+		.number_of_payloads = 0,
+		.defer_delay = get_defer_delay(ddc),
+		.max_defer_write_retry = 0 };
+
+	uint8_t retrieved = 0;
+	uint8_t base_offset =
+		(index % DDC_EDID_BLOCKS_PER_SEGMENT) * DDC_EDID_BLOCK_SIZE;
+	uint8_t segment = index / DDC_EDID_BLOCKS_PER_SEGMENT;
+
+	for (retrieved = 0; retrieved < DDC_EDID_BLOCK_SIZE;
+		retrieved += DEFAULT_AUX_MAX_DATA_SIZE) {
+
+		uint8_t offset = base_offset + retrieved;
+
+		struct aux_payload payloads[3] = {
+			{
+			.i2c_over_aux = true,
+			.write = true,
+			.address = DDC_EDID_SEGMENT_ADDRESS,
+			.length = 1,
+			.data = &segment },
+			{
+			.i2c_over_aux = true,
+			.write = true,
+			.address = address,
+			.length = 1,
+			.data = &offset },
+			{
+			.i2c_over_aux = true,
+			.write = false,
+			.address = address,
+			.length = DEFAULT_AUX_MAX_DATA_SIZE,
+			.data = &buf[retrieved] } };
+
+		if (segment == 0) {
+			cmd.payloads = &payloads[1];
+			cmd.number_of_payloads = 2;
+		} else {
+			cmd.payloads = payloads;
+			cmd.number_of_payloads = 3;
+		}
+
+		if (!dal_i2caux_submit_aux_command(
+			ddc->ctx->i2caux,
+			ddc->ddc_pin,
+			&cmd))
+			/* cannot read, break*/
+			break;
+	}
+
+	/* Reset segment to 0. Needed by some panels */
+	if (0 != segment) {
+		struct aux_payload payloads[1] = { {
+			.i2c_over_aux = true,
+			.write = true,
+			.address = DDC_EDID_SEGMENT_ADDRESS,
+			.length = 1,
+			.data = &segment } };
+		bool result = false;
+
+		segment = 0;
+
+		cmd.number_of_payloads = ARRAY_SIZE(payloads);
+		cmd.payloads = payloads;
+
+		result = dal_i2caux_submit_aux_command(
+			ddc->ctx->i2caux,
+			ddc->ddc_pin,
+			&cmd);
+
+		if (false == result)
+			dm_logger_write(
+				ddc->ctx->logger, LOG_ERROR,
+				"%s: Writing of EDID Segment (0x30) failed!\n",
+				__func__);
+	}
+
+	return retrieved;
+}
+
+static uint8_t i2c_read_edid_block(
+	struct ddc_service *ddc,
+	uint8_t address,
+	uint8_t index,
+	uint8_t *buf)
+{
+	bool ret = false;
+	uint8_t offset = (index % DDC_EDID_BLOCKS_PER_SEGMENT) *
+		DDC_EDID_BLOCK_SIZE;
+	uint8_t segment = index / DDC_EDID_BLOCKS_PER_SEGMENT;
+
+	struct i2c_command cmd = {
+		.payloads = NULL,
+		.number_of_payloads = 0,
+		.engine = DDC_I2C_COMMAND_ENGINE,
+		.speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+
+	struct i2c_payload payloads[3] = {
+		{
+		.write = true,
+		.address = DDC_EDID_SEGMENT_ADDRESS,
+		.length = 1,
+		.data = &segment },
+		{
+		.write = true,
+		.address = address,
+		.length = 1,
+		.data = &offset },
+		{
+		.write = false,
+		.address = address,
+		.length = DDC_EDID_BLOCK_SIZE,
+		.data = buf } };
+/*
+ * Some I2C engines don't handle stop/start between write-offset and read-data
+ * commands properly. For those displays, we have to force the newer E-DDC
+ * behavior of repeated-start which can be enabled by runtime parameter. */
+/* Originally implemented for OnLive using NXP receiver chip */
+
+	if (index == 0 && !ddc->flags.FORCE_READ_REPEATED_START) {
+		/* base block, use use DDC2B, submit as 2 commands */
+		cmd.payloads = &payloads[1];
+		cmd.number_of_payloads = 1;
+
+		if (dm_helpers_submit_i2c(
+			ddc->ctx,
+			&ddc->link->public,
+			&cmd)) {
+
+			cmd.payloads = &payloads[2];
+			cmd.number_of_payloads = 1;
+
+			ret = dm_helpers_submit_i2c(
+					ddc->ctx,
+					&ddc->link->public,
+					&cmd);
+		}
+
+	} else {
+		/*
+		 * extension block use E-DDC, submit as 1 command
+		 * or if repeated-start is forced by runtime parameter
+		 */
+		if (segment != 0) {
+			/* include segment offset in command*/
+			cmd.payloads = payloads;
+			cmd.number_of_payloads = 3;
+		} else {
+			/* we are reading first segment,
+			 * segment offset is not required */
+			cmd.payloads = &payloads[1];
+			cmd.number_of_payloads = 2;
+		}
+
+		ret = dm_helpers_submit_i2c(
+				ddc->ctx,
+				&ddc->link->public,
+				&cmd);
+	}
+
+	return ret ? DDC_EDID_BLOCK_SIZE : 0;
+}
+
+static uint32_t query_edid_block(
+	struct ddc_service *ddc,
+	uint8_t address,
+	uint8_t index,
+	uint8_t *buf,
+	uint32_t size)
+{
+	uint32_t size_retrieved = 0;
+
+	if (size < DDC_EDID_BLOCK_SIZE)
+		return 0;
+
+	if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
+		size_retrieved =
+			aux_read_edid_block(ddc, address, index, buf);
+	} else {
+		size_retrieved =
+			i2c_read_edid_block(ddc, address, index, buf);
+	}
+
+	return size_retrieved;
+}
+
+#define DDC_DPCD_EDID_CHECKSUM_WRITE_ADDRESS 0x261
+#define DDC_TEST_ACK_ADDRESS 0x260
+#define DDC_DPCD_EDID_TEST_ACK 0x04
+#define DDC_DPCD_EDID_TEST_MASK 0x04
+#define DDC_DPCD_TEST_REQUEST_ADDRESS 0x218
+
+/* AG TODO GO throug DM callback here like for DPCD */
+
+static void write_dp_edid_checksum(
+	struct ddc_service *ddc,
+	uint8_t checksum)
+{
+	uint8_t dpcd_data;
+
+	dal_ddc_service_read_dpcd_data(
+		ddc,
+		DDC_DPCD_TEST_REQUEST_ADDRESS,
+		&dpcd_data,
+		1);
+
+	if (dpcd_data & DDC_DPCD_EDID_TEST_MASK) {
+
+		dal_ddc_service_write_dpcd_data(
+			ddc,
+			DDC_DPCD_EDID_CHECKSUM_WRITE_ADDRESS,
+			&checksum,
+			1);
+
+		dpcd_data = DDC_DPCD_EDID_TEST_ACK;
+
+		dal_ddc_service_write_dpcd_data(
+			ddc,
+			DDC_TEST_ACK_ADDRESS,
+			&dpcd_data,
+			1);
+	}
+}
+
+uint32_t dal_ddc_service_edid_query(struct ddc_service *ddc)
+{
+	uint32_t bytes_read = 0;
+	uint32_t ext_cnt = 0;
+
+	uint8_t address;
+	uint32_t i;
+
+	for (address = DDC_EDID_ADDRESS_START;
+		address <= DDC_EDID_ADDRESS_END; ++address) {
+
+		bytes_read = query_edid_block(
+			ddc,
+			address,
+			0,
+			ddc->edid_buf,
+			sizeof(ddc->edid_buf) - bytes_read);
+
+		if (bytes_read != DDC_EDID_BLOCK_SIZE)
+			continue;
+
+		/* get the number of ext blocks*/
+		ext_cnt = ddc->edid_buf[DDC_EDID_EXT_COUNT_OFFSET];
+
+		/* EDID 2.0, need to read 1 more block because EDID2.0 is
+		 * 256 byte in size*/
+		if (ddc->edid_buf[DDC_EDID_20_SIGNATURE_OFFSET] ==
+			DDC_EDID_20_SIGNATURE)
+				ext_cnt = 1;
+
+		for (i = 0; i < ext_cnt; i++) {
+			/* read additional ext blocks accordingly */
+			bytes_read += query_edid_block(
+					ddc,
+					address,
+					i+1,
+					&ddc->edid_buf[bytes_read],
+					sizeof(ddc->edid_buf) - bytes_read);
+		}
+
+		/*this is special code path for DP compliance*/
+		if (DDC_TRANSACTION_TYPE_I2C_OVER_AUX == ddc->transaction_type)
+			write_dp_edid_checksum(
+				ddc,
+				ddc->edid_buf[(ext_cnt * DDC_EDID_BLOCK_SIZE) +
+				DDC_EDID1X_CHECKSUM_OFFSET]);
+
+		/*remembers the address where we fetch the EDID from
+		 * for later signature check use */
+		ddc->address = address;
+
+		break;/* already read edid, done*/
+	}
+
+	ddc->edid_buf_len = bytes_read;
+	return bytes_read;
+}
+
+uint32_t dal_ddc_service_get_edid_buf_len(struct ddc_service *ddc)
+{
+	return ddc->edid_buf_len;
+}
+
+void dal_ddc_service_get_edid_buf(struct ddc_service *ddc, uint8_t *edid_buf)
+{
+	memmove(edid_buf,
+			ddc->edid_buf, ddc->edid_buf_len);
+}
+
+void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
+	struct ddc_service *ddc,
+	struct display_sink_capability *sink_cap)
+{
+	uint8_t i;
+	bool is_valid_hdmi_signature;
+	enum display_dongle_type *dongle = &sink_cap->dongle_type;
+	uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
+	bool is_type2_dongle = false;
+	struct dp_hdmi_dongle_signature_data *dongle_signature;
+
+	/* Assume we have no valid DP passive dongle connected */
+	*dongle = DISPLAY_DONGLE_NONE;
+	sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
+
+	/* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
+	if (!i2c_read(
+		ddc,
+		DP_HDMI_DONGLE_ADDRESS,
+		type2_dongle_buf,
+		sizeof(type2_dongle_buf))) {
+		*dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+		sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
+
+		CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
+				"DP-DVI passive dongle %dMhz: ",
+				DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+		return;
+	}
+
+	/* Check if Type 2 dongle.*/
+	if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID)
+		is_type2_dongle = true;
+
+	dongle_signature =
+		(struct dp_hdmi_dongle_signature_data *)type2_dongle_buf;
+
+	is_valid_hdmi_signature = true;
+
+	/* Check EOT */
+	if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) {
+		is_valid_hdmi_signature = false;
+	}
+
+	/* Check signature */
+	for (i = 0; i < sizeof(dongle_signature->id); ++i) {
+		/* If its not the right signature,
+		 * skip mismatch in subversion byte.*/
+		if (dongle_signature->id[i] !=
+			dp_hdmi_dongle_signature_str[i] && i != 3) {
+
+			if (is_type2_dongle) {
+				is_valid_hdmi_signature = false;
+				break;
+			}
+
+		}
+	}
+
+	if (is_type2_dongle) {
+		uint32_t max_tmds_clk =
+			type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK];
+
+		max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2;
+
+		if (0 == max_tmds_clk ||
+				max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK ||
+				max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) {
+			*dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+
+			CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+					sizeof(type2_dongle_buf),
+					"DP-DVI passive dongle %dMhz: ",
+					DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+		} else {
+			if (is_valid_hdmi_signature == true) {
+				*dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
+
+				CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+						sizeof(type2_dongle_buf),
+						"Type 2 DP-HDMI passive dongle %dMhz: ",
+						max_tmds_clk);
+			} else {
+				*dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
+
+				CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+						sizeof(type2_dongle_buf),
+						"Type 2 DP-HDMI passive dongle (no signature) %dMhz: ",
+						max_tmds_clk);
+
+			}
+
+			/* Multiply by 1000 to convert to kHz. */
+			sink_cap->max_hdmi_pixel_clock =
+				max_tmds_clk * 1000;
+		}
+
+	} else {
+		if (is_valid_hdmi_signature == true) {
+			*dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
+
+			CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+					sizeof(type2_dongle_buf),
+					"Type 1 DP-HDMI passive dongle %dMhz: ",
+					sink_cap->max_hdmi_pixel_clock / 1000);
+		} else {
+			*dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
+
+			CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+					sizeof(type2_dongle_buf),
+					"Type 1 DP-HDMI passive dongle (no signature) %dMhz: ",
+					sink_cap->max_hdmi_pixel_clock / 1000);
+		}
+	}
+
+	return;
+}
+
+enum {
+	DP_SINK_CAP_SIZE =
+		DPCD_ADDRESS_EDP_CONFIG_CAP - DPCD_ADDRESS_DPCD_REV + 1
+};
+
+bool dal_ddc_service_query_ddc_data(
+	struct ddc_service *ddc,
+	uint32_t address,
+	uint8_t *write_buf,
+	uint32_t write_size,
+	uint8_t *read_buf,
+	uint32_t read_size)
+{
+	bool ret;
+	uint32_t payload_size =
+		dal_ddc_service_is_in_aux_transaction_mode(ddc) ?
+			DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;
+
+	uint32_t write_payloads =
+		(write_size + payload_size - 1) / payload_size;
+
+	uint32_t read_payloads =
+		(read_size + payload_size - 1) / payload_size;
+
+	uint32_t payloads_num = write_payloads + read_payloads;
+
+	if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
+		return false;
+
+	/*TODO: len of payload data for i2c and aux is uint8!!!!,
+	 *  but we want to read 256 over i2c!!!!*/
+	if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
+
+		struct aux_payloads *payloads =
+			dal_ddc_aux_payloads_create(ddc->ctx, payloads_num);
+
+		struct aux_command command = {
+			.payloads = dal_ddc_aux_payloads_get(payloads),
+			.number_of_payloads = 0,
+			.defer_delay = get_defer_delay(ddc),
+			.max_defer_write_retry = 0 };
+
+		dal_ddc_aux_payloads_add(
+			payloads, address, write_size, write_buf, true);
+
+		dal_ddc_aux_payloads_add(
+			payloads, address, read_size, read_buf, false);
+
+		command.number_of_payloads =
+			dal_ddc_aux_payloads_get_count(payloads);
+
+		ret = dal_i2caux_submit_aux_command(
+				ddc->ctx->i2caux,
+				ddc->ddc_pin,
+				&command);
+
+		dal_ddc_aux_payloads_destroy(&payloads);
+
+	} else {
+		struct i2c_payloads *payloads =
+			dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
+
+		struct i2c_command command = {
+			.payloads = dal_ddc_i2c_payloads_get(payloads),
+			.number_of_payloads = 0,
+			.engine = DDC_I2C_COMMAND_ENGINE,
+			.speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+
+		dal_ddc_i2c_payloads_add(
+			payloads, address, write_size, write_buf, true);
+
+		dal_ddc_i2c_payloads_add(
+			payloads, address, read_size, read_buf, false);
+
+		command.number_of_payloads =
+			dal_ddc_i2c_payloads_get_count(payloads);
+
+		ret = dm_helpers_submit_i2c(
+				ddc->ctx,
+				&ddc->link->public,
+				&command);
+
+		dal_ddc_i2c_payloads_destroy(&payloads);
+	}
+
+	return ret;
+}
+
+enum ddc_result dal_ddc_service_read_dpcd_data(
+	struct ddc_service *ddc,
+	uint32_t address,
+	uint8_t *data,
+	uint32_t len)
+{
+	struct aux_payload read_payload = {
+		.i2c_over_aux = false,
+		.write = false,
+		.address = address,
+		.length = len,
+		.data = data,
+	};
+	struct aux_command command = {
+		.payloads = &read_payload,
+		.number_of_payloads = 1,
+		.defer_delay = 0,
+		.max_defer_write_retry = 0,
+	};
+
+	if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+		BREAK_TO_DEBUGGER();
+		return DDC_RESULT_FAILED_INVALID_OPERATION;
+	}
+
+	if (dal_i2caux_submit_aux_command(
+		ddc->ctx->i2caux,
+		ddc->ddc_pin,
+		&command))
+		return DDC_RESULT_SUCESSFULL;
+
+	return DDC_RESULT_FAILED_OPERATION;
+}
+
+enum ddc_result dal_ddc_service_write_dpcd_data(
+	struct ddc_service *ddc,
+	uint32_t address,
+	const uint8_t *data,
+	uint32_t len)
+{
+	struct aux_payload write_payload = {
+		.i2c_over_aux = false,
+		.write = true,
+		.address = address,
+		.length = len,
+		.data = (uint8_t *)data,
+	};
+	struct aux_command command = {
+		.payloads = &write_payload,
+		.number_of_payloads = 1,
+		.defer_delay = 0,
+		.max_defer_write_retry = 0,
+	};
+
+	if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+		BREAK_TO_DEBUGGER();
+		return DDC_RESULT_FAILED_INVALID_OPERATION;
+	}
+
+	if (dal_i2caux_submit_aux_command(
+		ddc->ctx->i2caux,
+		ddc->ddc_pin,
+		&command))
+		return DDC_RESULT_SUCESSFULL;
+
+	return DDC_RESULT_FAILED_OPERATION;
+}
+
+/*test only function*/
+void dal_ddc_service_set_ddc_pin(
+	struct ddc_service *ddc_service,
+	struct ddc *ddc)
+{
+	ddc_service->ddc_pin = ddc;
+}
+
+struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service)
+{
+	return ddc_service->ddc_pin;
+}
+
+void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
+		uint32_t pix_clk,
+		bool lte_340_scramble)
+{
+	bool over_340_mhz = pix_clk > 340000 ? 1 : 0;
+	uint8_t slave_address = HDMI_SCDC_ADDRESS;
+	uint8_t offset = HDMI_SCDC_SINK_VERSION;
+	uint8_t sink_version = 0;
+	uint8_t write_buffer[2] = {0};
+	/*Lower than 340 Scramble bit from SCDC caps*/
+
+	dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
+			sizeof(offset), &sink_version, sizeof(sink_version));
+	if (sink_version == 1) {
+		/*Source Version = 1*/
+		write_buffer[0] = HDMI_SCDC_SOURCE_VERSION;
+		write_buffer[1] = 1;
+		dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+				write_buffer, sizeof(write_buffer), NULL, 0);
+		/*Read Request from SCDC caps*/
+	}
+	write_buffer[0] = HDMI_SCDC_TMDS_CONFIG;
+
+	if (over_340_mhz) {
+		write_buffer[1] = 3;
+	} else if (lte_340_scramble) {
+		write_buffer[1] = 1;
+	} else {
+		write_buffer[1] = 0;
+	}
+	dal_ddc_service_query_ddc_data(ddc_service, slave_address, write_buffer,
+			sizeof(write_buffer), NULL, 0);
+}
+
+void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
+{
+	uint8_t slave_address = HDMI_SCDC_ADDRESS;
+	uint8_t offset = HDMI_SCDC_TMDS_CONFIG;
+	uint8_t tmds_config = 0;
+
+	dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
+			sizeof(offset), &tmds_config, sizeof(tmds_config));
+	if (tmds_config & 0x1) {
+		union hdmi_scdc_status_flags_data status_data = { {0} };
+		uint8_t scramble_status = 0;
+
+		offset = HDMI_SCDC_SCRAMBLER_STATUS;
+		dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+				&offset, sizeof(offset), &scramble_status,
+				sizeof(scramble_status));
+		offset = HDMI_SCDC_STATUS_FLAGS;
+		dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+				&offset, sizeof(offset), status_data.byte,
+				sizeof(status_data.byte));
+	}
+}
+

+ 2462 - 0
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c

@@ -0,0 +1,2462 @@
+/* Copyright 2015 Advanced Micro Devices, Inc. */
+#include "dm_services.h"
+#include "dc.h"
+#include "dc_link_dp.h"
+#include "dm_helpers.h"
+
+#include "inc/core_types.h"
+#include "link_hwss.h"
+#include "dc_link_ddc.h"
+#include "core_status.h"
+#include "dpcd_defs.h"
+
+#include "core_dc.h"
+
+/* maximum pre emphasis level allowed for each voltage swing level*/
+static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = {
+		PRE_EMPHASIS_LEVEL3,
+		PRE_EMPHASIS_LEVEL2,
+		PRE_EMPHASIS_LEVEL1,
+		PRE_EMPHASIS_DISABLED };
+
+enum {
+	POST_LT_ADJ_REQ_LIMIT = 6,
+	POST_LT_ADJ_REQ_TIMEOUT = 200
+};
+
+enum {
+	LINK_TRAINING_MAX_RETRY_COUNT = 5,
+	/* to avoid infinite loop where-in the receiver
+	 * switches between different VS
+	 */
+	LINK_TRAINING_MAX_CR_RETRY = 100
+};
+
+static const struct dc_link_settings link_training_fallback_table[] = {
+/* 4320 Mbytes/sec*/
+{ LANE_COUNT_FOUR, LINK_RATE_HIGH3, LINK_SPREAD_DISABLED },
+/* 2160 Mbytes/sec*/
+{ LANE_COUNT_FOUR, LINK_RATE_HIGH2, LINK_SPREAD_DISABLED },
+/* 1080 Mbytes/sec*/
+{ LANE_COUNT_FOUR, LINK_RATE_HIGH, LINK_SPREAD_DISABLED },
+/* 648 Mbytes/sec*/
+{ LANE_COUNT_FOUR, LINK_RATE_LOW, LINK_SPREAD_DISABLED },
+/* 2160 Mbytes/sec*/
+{ LANE_COUNT_TWO, LINK_RATE_HIGH3, LINK_SPREAD_DISABLED },
+/* 1080 Mbytes/sec*/
+{ LANE_COUNT_TWO, LINK_RATE_HIGH2, LINK_SPREAD_DISABLED },
+/* 540 Mbytes/sec*/
+{ LANE_COUNT_TWO, LINK_RATE_HIGH, LINK_SPREAD_DISABLED },
+/* 324 Mbytes/sec*/
+{ LANE_COUNT_TWO, LINK_RATE_LOW, LINK_SPREAD_DISABLED },
+/* 1080 Mbytes/sec*/
+{ LANE_COUNT_ONE, LINK_RATE_HIGH3, LINK_SPREAD_DISABLED },
+/* 540 Mbytes/sec*/
+{ LANE_COUNT_ONE, LINK_RATE_HIGH2, LINK_SPREAD_DISABLED },
+/* 270 Mbytes/sec*/
+{ LANE_COUNT_ONE, LINK_RATE_HIGH, LINK_SPREAD_DISABLED },
+/* 162 Mbytes/sec*/
+{ LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED } };
+
+static void wait_for_training_aux_rd_interval(
+	struct core_link* link,
+	uint32_t default_wait_in_micro_secs)
+{
+	union training_aux_rd_interval training_rd_interval;
+
+	/* overwrite the delay if rev > 1.1*/
+	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+		/* DP 1.2 or later - retrieve delay through
+		 * "DPCD_ADDR_TRAINING_AUX_RD_INTERVAL" register */
+		core_link_read_dpcd(
+			link,
+			DPCD_ADDRESS_TRAINING_AUX_RD_INTERVAL,
+			(uint8_t *)&training_rd_interval,
+			sizeof(training_rd_interval));
+
+		if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
+			default_wait_in_micro_secs =
+				training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+	}
+
+	udelay(default_wait_in_micro_secs);
+
+	dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+		"%s:\n wait = %d\n",
+		__func__,
+		default_wait_in_micro_secs);
+}
+
+static void dpcd_set_training_pattern(
+	struct core_link* link,
+	union dpcd_training_pattern dpcd_pattern)
+{
+	core_link_write_dpcd(
+		link,
+		DPCD_ADDRESS_TRAINING_PATTERN_SET,
+		&dpcd_pattern.raw,
+		1);
+
+	dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+		"%s\n %x pattern = %x\n",
+		__func__,
+		DPCD_ADDRESS_TRAINING_PATTERN_SET,
+		dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+}
+
+static void dpcd_set_link_settings(
+	struct core_link* link,
+	const struct link_training_settings *lt_settings)
+{
+	uint8_t rate = (uint8_t)
+	(lt_settings->link_settings.link_rate);
+
+	union down_spread_ctrl downspread = {{0}};
+	union lane_count_set lane_count_set = {{0}};
+	uint8_t link_set_buffer[2];
+
+	downspread.raw = (uint8_t)
+	(lt_settings->link_settings.link_spread);
+
+	lane_count_set.bits.LANE_COUNT_SET =
+	lt_settings->link_settings.lane_count;
+
+	lane_count_set.bits.ENHANCED_FRAMING = 1;
+
+	lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
+		link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
+
+	link_set_buffer[0] = rate;
+	link_set_buffer[1] = lane_count_set.raw;
+
+	core_link_write_dpcd(link, DPCD_ADDRESS_LINK_BW_SET,
+	link_set_buffer, 2);
+	core_link_write_dpcd(link, DPCD_ADDRESS_DOWNSPREAD_CNTL,
+	&downspread.raw, sizeof(downspread));
+
+	dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+		"%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
+		__func__,
+		DPCD_ADDRESS_LINK_BW_SET,
+		lt_settings->link_settings.link_rate,
+		DPCD_ADDRESS_LANE_COUNT_SET,
+		lt_settings->link_settings.lane_count,
+		DPCD_ADDRESS_DOWNSPREAD_CNTL,
+		lt_settings->link_settings.link_spread);
+
+}
+
+static enum dpcd_training_patterns
+	hw_training_pattern_to_dpcd_training_pattern(
+	struct core_link* link,
+	enum hw_dp_training_pattern pattern)
+{
+	enum dpcd_training_patterns dpcd_tr_pattern =
+	DPCD_TRAINING_PATTERN_VIDEOIDLE;
+
+	switch (pattern) {
+	case HW_DP_TRAINING_PATTERN_1:
+		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1;
+		break;
+	case HW_DP_TRAINING_PATTERN_2:
+		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2;
+		break;
+	case HW_DP_TRAINING_PATTERN_3:
+		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3;
+		break;
+	case HW_DP_TRAINING_PATTERN_4:
+		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4;
+		break;
+	default:
+		ASSERT(0);
+		dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+			"%s: Invalid HW Training pattern: %d\n",
+			__func__, pattern);
+		break;
+	}
+
+	return dpcd_tr_pattern;
+
+}
+
+static void dpcd_set_lt_pattern_and_lane_settings(
+	struct core_link* link,
+	const struct link_training_settings *lt_settings,
+	enum hw_dp_training_pattern pattern)
+{
+	union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
+	const uint32_t dpcd_base_lt_offset =
+	DPCD_ADDRESS_TRAINING_PATTERN_SET;
+	uint8_t dpcd_lt_buffer[5] = {0};
+	union dpcd_training_pattern dpcd_pattern = {{0}};
+	uint32_t lane;
+	uint32_t size_in_bytes;
+	bool edp_workaround = false; /* TODO link_prop.INTERNAL */
+
+	/*****************************************************************
+	* DpcdAddress_TrainingPatternSet
+	*****************************************************************/
+	dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
+		hw_training_pattern_to_dpcd_training_pattern(link, pattern);
+
+	dpcd_lt_buffer[DPCD_ADDRESS_TRAINING_PATTERN_SET - dpcd_base_lt_offset]
+		= dpcd_pattern.raw;
+
+	dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+		"%s\n %x pattern = %x\n",
+		__func__,
+		DPCD_ADDRESS_TRAINING_PATTERN_SET,
+		dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+
+	/*****************************************************************
+	* DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set
+	*****************************************************************/
+	for (lane = 0; lane <
+		(uint32_t)(lt_settings->link_settings.lane_count); lane++) {
+
+		dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
+		(uint8_t)(lt_settings->lane_settings[lane].VOLTAGE_SWING);
+		dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
+		(uint8_t)(lt_settings->lane_settings[lane].PRE_EMPHASIS);
+
+		dpcd_lane[lane].bits.MAX_SWING_REACHED =
+		(lt_settings->lane_settings[lane].VOLTAGE_SWING ==
+		VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+		dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+		(lt_settings->lane_settings[lane].PRE_EMPHASIS ==
+		PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+	}
+
+	/* concatinate everything into one buffer*/
+
+	size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]);
+
+	 // 0x00103 - 0x00102
+	memmove(
+		&dpcd_lt_buffer[DPCD_ADDRESS_LANE0_SET - dpcd_base_lt_offset],
+		dpcd_lane,
+		size_in_bytes);
+
+	dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+		"%s:\n %x VS set = %x  PE set = %x \
+		max VS Reached = %x  max PE Reached = %x\n",
+		__func__,
+		DPCD_ADDRESS_LANE0_SET,
+		dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+		dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+		dpcd_lane[0].bits.MAX_SWING_REACHED,
+		dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+
+	if (edp_workaround) {
+		/* for eDP write in 2 parts because the 5-byte burst is
+		* causing issues on some eDP panels (EPR#366724)
+		*/
+		core_link_write_dpcd(
+			link,
+			DPCD_ADDRESS_TRAINING_PATTERN_SET,
+			&dpcd_pattern.raw,
+			sizeof(dpcd_pattern.raw) );
+
+		core_link_write_dpcd(
+			link,
+			DPCD_ADDRESS_LANE0_SET,
+			(uint8_t *)(dpcd_lane),
+			size_in_bytes);
+
+		} else
+		/* write it all in (1 + number-of-lanes)-byte burst*/
+			core_link_write_dpcd(
+				link,
+				dpcd_base_lt_offset,
+				dpcd_lt_buffer,
+				size_in_bytes + sizeof(dpcd_pattern.raw) );
+
+	link->public.cur_lane_setting = lt_settings->lane_settings[0];
+}
+
+static bool is_cr_done(enum dc_lane_count ln_count,
+	union lane_status *dpcd_lane_status)
+{
+	bool done = true;
+	uint32_t lane;
+	/*LANEx_CR_DONE bits All 1's?*/
+	for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
+		if (!dpcd_lane_status[lane].bits.CR_DONE_0)
+			done = false;
+	}
+	return done;
+
+}
+
+static bool is_ch_eq_done(enum dc_lane_count ln_count,
+	union lane_status *dpcd_lane_status,
+	union lane_align_status_updated *lane_status_updated)
+{
+	bool done = true;
+	uint32_t lane;
+	if (!lane_status_updated->bits.INTERLANE_ALIGN_DONE)
+		done = false;
+	else {
+		for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
+			if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0 ||
+				!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0)
+				done = false;
+		}
+	}
+	return done;
+
+}
+
+static void update_drive_settings(
+		struct link_training_settings *dest,
+		struct link_training_settings src)
+{
+	uint32_t lane;
+	for (lane = 0; lane < src.link_settings.lane_count; lane++) {
+		dest->lane_settings[lane].VOLTAGE_SWING =
+			src.lane_settings[lane].VOLTAGE_SWING;
+		dest->lane_settings[lane].PRE_EMPHASIS =
+			src.lane_settings[lane].PRE_EMPHASIS;
+		dest->lane_settings[lane].POST_CURSOR2 =
+			src.lane_settings[lane].POST_CURSOR2;
+	}
+}
+
+static uint8_t get_nibble_at_index(const uint8_t *buf,
+	uint32_t index)
+{
+	uint8_t nibble;
+	nibble = buf[index / 2];
+
+	if (index % 2)
+		nibble >>= 4;
+	else
+		nibble &= 0x0F;
+
+	return nibble;
+}
+
+static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing(
+	enum dc_voltage_swing voltage)
+{
+	enum dc_pre_emphasis pre_emphasis;
+	pre_emphasis = PRE_EMPHASIS_MAX_LEVEL;
+
+	if (voltage <= VOLTAGE_SWING_MAX_LEVEL)
+		pre_emphasis = voltage_swing_to_pre_emphasis[voltage];
+
+	return pre_emphasis;
+
+}
+
+static void find_max_drive_settings(
+	const struct link_training_settings *link_training_setting,
+	struct link_training_settings *max_lt_setting)
+{
+	uint32_t lane;
+	struct dc_lane_settings max_requested;
+
+	max_requested.VOLTAGE_SWING =
+		link_training_setting->
+		lane_settings[0].VOLTAGE_SWING;
+	max_requested.PRE_EMPHASIS =
+		link_training_setting->
+		lane_settings[0].PRE_EMPHASIS;
+	/*max_requested.postCursor2 =
+	 * link_training_setting->laneSettings[0].postCursor2;*/
+
+	/* Determine what the maximum of the requested settings are*/
+	for (lane = 1; lane < link_training_setting->link_settings.lane_count;
+			lane++) {
+		if (link_training_setting->lane_settings[lane].VOLTAGE_SWING >
+			max_requested.VOLTAGE_SWING)
+
+			max_requested.VOLTAGE_SWING =
+			link_training_setting->
+			lane_settings[lane].VOLTAGE_SWING;
+
+		if (link_training_setting->lane_settings[lane].PRE_EMPHASIS >
+				max_requested.PRE_EMPHASIS)
+			max_requested.PRE_EMPHASIS =
+			link_training_setting->
+			lane_settings[lane].PRE_EMPHASIS;
+
+		/*
+		if (link_training_setting->laneSettings[lane].postCursor2 >
+		 max_requested.postCursor2)
+		{
+		max_requested.postCursor2 =
+		link_training_setting->laneSettings[lane].postCursor2;
+		}
+		*/
+	}
+
+	/* make sure the requested settings are
+	 * not higher than maximum settings*/
+	if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL)
+		max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL;
+
+	if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL)
+		max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL;
+	/*
+	if (max_requested.postCursor2 > PostCursor2_MaxLevel)
+	max_requested.postCursor2 = PostCursor2_MaxLevel;
+	*/
+
+	/* make sure the pre-emphasis matches the voltage swing*/
+	if (max_requested.PRE_EMPHASIS >
+		get_max_pre_emphasis_for_voltage_swing(
+			max_requested.VOLTAGE_SWING))
+		max_requested.PRE_EMPHASIS =
+		get_max_pre_emphasis_for_voltage_swing(
+			max_requested.VOLTAGE_SWING);
+
+	/*
+	 * Post Cursor2 levels are completely independent from
+	 * pre-emphasis (Post Cursor1) levels. But Post Cursor2 levels
+	 * can only be applied to each allowable combination of voltage
+	 * swing and pre-emphasis levels */
+	 /* if ( max_requested.postCursor2 >
+	  *  getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing))
+	  *  max_requested.postCursor2 =
+	  *  getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing);
+	  */
+
+	max_lt_setting->link_settings.link_rate =
+		link_training_setting->link_settings.link_rate;
+	max_lt_setting->link_settings.lane_count =
+	link_training_setting->link_settings.lane_count;
+	max_lt_setting->link_settings.link_spread =
+		link_training_setting->link_settings.link_spread;
+
+	for (lane = 0; lane <
+		link_training_setting->link_settings.lane_count;
+		lane++) {
+		max_lt_setting->lane_settings[lane].VOLTAGE_SWING =
+			max_requested.VOLTAGE_SWING;
+		max_lt_setting->lane_settings[lane].PRE_EMPHASIS =
+			max_requested.PRE_EMPHASIS;
+		/*max_lt_setting->laneSettings[lane].postCursor2 =
+		 * max_requested.postCursor2;
+		 */
+	}
+
+}
+
+static void get_lane_status_and_drive_settings(
+	struct core_link* link,
+	const struct link_training_settings *link_training_setting,
+	union lane_status *ln_status,
+	union lane_align_status_updated *ln_status_updated,
+	struct link_training_settings *req_settings)
+{
+	uint8_t dpcd_buf[6] = {0};
+	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {{{0}}};
+	struct link_training_settings request_settings = {{0}};
+	uint32_t lane;
+
+	memset(req_settings, '\0', sizeof(struct link_training_settings));
+
+	core_link_read_dpcd(
+		link,
+		DPCD_ADDRESS_LANE_01_STATUS,
+		(uint8_t *)(dpcd_buf),
+		sizeof(dpcd_buf));
+
+	for (lane = 0; lane <
+		(uint32_t)(link_training_setting->link_settings.lane_count);
+		lane++) {
+
+		ln_status[lane].raw =
+			get_nibble_at_index(&dpcd_buf[0], lane);
+		dpcd_lane_adjust[lane].raw =
+			get_nibble_at_index(&dpcd_buf[4], lane);
+	}
+
+	ln_status_updated->raw = dpcd_buf[2];
+
+	dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+		"%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ",
+		__func__,
+		DPCD_ADDRESS_LANE_01_STATUS, dpcd_buf[0],
+		DPCD_ADDRESS_LANE_23_STATUS, dpcd_buf[1]);
+
+	dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+		"%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n",
+		__func__,
+		DPCD_ADDRESS_ADJUST_REQUEST_LANE0_1,
+		dpcd_buf[4],
+		DPCD_ADDRESS_ADJUST_REQUEST_LANE2_3,
+		dpcd_buf[5]);
+
+	/*copy to req_settings*/
+	request_settings.link_settings.lane_count =
+		link_training_setting->link_settings.lane_count;
+	request_settings.link_settings.link_rate =
+		link_training_setting->link_settings.link_rate;
+	request_settings.link_settings.link_spread =
+		link_training_setting->link_settings.link_spread;
+
+	for (lane = 0; lane <
+		(uint32_t)(link_training_setting->link_settings.lane_count);
+		lane++) {
+
+		request_settings.lane_settings[lane].VOLTAGE_SWING =
+			(enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits.
+				VOLTAGE_SWING_LANE);
+		request_settings.lane_settings[lane].PRE_EMPHASIS =
+			(enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits.
+				PRE_EMPHASIS_LANE);
+	}
+
+	/*Note: for postcursor2, read adjusted
+	 * postcursor2 settings from*/
+	/*DpcdAddress_AdjustRequestPostCursor2 =
+	 *0x020C (not implemented yet)*/
+
+	/* we find the maximum of the requested settings across all lanes*/
+	/* and set this maximum for all lanes*/
+	find_max_drive_settings(&request_settings, req_settings);
+
+	/* if post cursor 2 is needed in the future,
+	 * read DpcdAddress_AdjustRequestPostCursor2 = 0x020C
+	 */
+
+}
+
+static void dpcd_set_lane_settings(
+	struct core_link* link,
+	const struct link_training_settings *link_training_setting)
+{
+	union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
+	uint32_t lane;
+
+	for (lane = 0; lane <
+		(uint32_t)(link_training_setting->
+		link_settings.lane_count);
+		lane++) {
+		dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
+			(uint8_t)(link_training_setting->
+			lane_settings[lane].VOLTAGE_SWING);
+		dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
+			(uint8_t)(link_training_setting->
+			lane_settings[lane].PRE_EMPHASIS);
+		dpcd_lane[lane].bits.MAX_SWING_REACHED =
+			(link_training_setting->
+			lane_settings[lane].VOLTAGE_SWING ==
+			VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+		dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+			(link_training_setting->
+			lane_settings[lane].PRE_EMPHASIS ==
+			PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+	}
+
+	core_link_write_dpcd(link,
+		DPCD_ADDRESS_LANE0_SET,
+		(uint8_t *)(dpcd_lane),
+		link_training_setting->link_settings.lane_count);
+
+	/*
+	if (LTSettings.link.rate == LinkRate_High2)
+	{
+		DpcdTrainingLaneSet2 dpcd_lane2[lane_count_DPMax] = {0};
+		for ( uint32_t lane = 0;
+		lane < lane_count_DPMax; lane++)
+		{
+			dpcd_lane2[lane].bits.post_cursor2_set =
+			static_cast<unsigned char>(
+			LTSettings.laneSettings[lane].postCursor2);
+			dpcd_lane2[lane].bits.max_post_cursor2_reached = 0;
+		}
+		m_pDpcdAccessSrv->WriteDpcdData(
+		DpcdAddress_Lane0Set2,
+		reinterpret_cast<unsigned char*>(dpcd_lane2),
+		LTSettings.link.lanes);
+	}
+	*/
+
+	dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+		"%s\n %x VS set = %x  PE set = %x \
+		max VS Reached = %x  max PE Reached = %x\n",
+		__func__,
+		DPCD_ADDRESS_LANE0_SET,
+		dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+		dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+		dpcd_lane[0].bits.MAX_SWING_REACHED,
+		dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+
+	link->public.cur_lane_setting = link_training_setting->lane_settings[0];
+
+}
+
+static bool is_max_vs_reached(
+	const struct link_training_settings *lt_settings)
+{
+	uint32_t lane;
+	for (lane = 0; lane <
+		(uint32_t)(lt_settings->link_settings.lane_count);
+		lane++) {
+		if (lt_settings->lane_settings[lane].VOLTAGE_SWING
+			== VOLTAGE_SWING_MAX_LEVEL)
+			return true;
+	}
+	return false;
+
+}
+
+void dc_link_dp_set_drive_settings(
+	struct dc_link *link,
+	struct link_training_settings *lt_settings)
+{
+	struct core_link *core_link = DC_LINK_TO_CORE(link);
+	/* program ASIC PHY settings*/
+	dp_set_hw_lane_settings(core_link, lt_settings);
+
+	/* Notify DP sink the PHY settings from source */
+	dpcd_set_lane_settings(core_link, lt_settings);
+}
+
+static bool perform_post_lt_adj_req_sequence(
+	struct core_link *link,
+	struct link_training_settings *lt_settings)
+{
+	enum dc_lane_count lane_count =
+	lt_settings->link_settings.lane_count;
+
+	uint32_t adj_req_count;
+	uint32_t adj_req_timer;
+	bool req_drv_setting_changed;
+	uint32_t lane;
+
+	req_drv_setting_changed = false;
+	for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT;
+	adj_req_count++) {
+
+		req_drv_setting_changed = false;
+
+		for (adj_req_timer = 0;
+			adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT;
+			adj_req_timer++) {
+
+			struct link_training_settings req_settings;
+			union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+			union lane_align_status_updated
+				dpcd_lane_status_updated;
+
+			get_lane_status_and_drive_settings(
+			link,
+			lt_settings,
+			dpcd_lane_status,
+			&dpcd_lane_status_updated,
+			&req_settings);
+
+			if (dpcd_lane_status_updated.bits.
+					POST_LT_ADJ_REQ_IN_PROGRESS == 0)
+				return true;
+
+			if (!is_cr_done(lane_count, dpcd_lane_status))
+				return false;
+
+			if (!is_ch_eq_done(
+				lane_count,
+				dpcd_lane_status,
+				&dpcd_lane_status_updated))
+				return false;
+
+			for (lane = 0; lane < (uint32_t)(lane_count); lane++) {
+
+				if (lt_settings->
+				lane_settings[lane].VOLTAGE_SWING !=
+				req_settings.lane_settings[lane].
+				VOLTAGE_SWING ||
+				lt_settings->lane_settings[lane].PRE_EMPHASIS !=
+				req_settings.lane_settings[lane].PRE_EMPHASIS) {
+
+					req_drv_setting_changed = true;
+					break;
+				}
+			}
+
+			if (req_drv_setting_changed) {
+				update_drive_settings(
+					lt_settings,req_settings);
+
+				dc_link_dp_set_drive_settings(&link->public,
+						lt_settings);
+				break;
+			}
+
+			msleep(1);
+		}
+
+		if (!req_drv_setting_changed) {
+			dm_logger_write(link->ctx->logger, LOG_WARNING,
+				"%s: Post Link Training Adjust Request Timed out\n",
+				__func__);
+
+			ASSERT(0);
+			return true;
+		}
+	}
+	dm_logger_write(link->ctx->logger, LOG_WARNING,
+		"%s: Post Link Training Adjust Request limit reached\n",
+		__func__);
+
+	ASSERT(0);
+	return true;
+
+}
+
+static enum hw_dp_training_pattern get_supported_tp(struct core_link *link)
+{
+	enum hw_dp_training_pattern highest_tp = HW_DP_TRAINING_PATTERN_2;
+	struct encoder_feature_support *features = &link->link_enc->features;
+	struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
+
+	if (features->flags.bits.IS_TPS3_CAPABLE)
+		highest_tp = HW_DP_TRAINING_PATTERN_3;
+
+	if (features->flags.bits.IS_TPS4_CAPABLE)
+		highest_tp = HW_DP_TRAINING_PATTERN_4;
+
+	if (dpcd_caps->max_down_spread.bits.TPS4_SUPPORTED &&
+		highest_tp >= HW_DP_TRAINING_PATTERN_4)
+		return HW_DP_TRAINING_PATTERN_4;
+
+	if (dpcd_caps->max_ln_count.bits.TPS3_SUPPORTED &&
+		highest_tp >= HW_DP_TRAINING_PATTERN_3)
+		return HW_DP_TRAINING_PATTERN_3;
+
+	return HW_DP_TRAINING_PATTERN_2;
+}
+
+static bool perform_channel_equalization_sequence(
+	struct core_link *link,
+	struct link_training_settings *lt_settings)
+{
+	struct link_training_settings req_settings;
+	enum hw_dp_training_pattern hw_tr_pattern;
+	uint32_t retries_ch_eq;
+	enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+	union lane_align_status_updated dpcd_lane_status_updated = {{0}};
+	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};;
+
+	hw_tr_pattern = get_supported_tp(link);
+
+	dp_set_hw_training_pattern(link, hw_tr_pattern);
+
+	for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
+		retries_ch_eq++) {
+
+		dp_set_hw_lane_settings(link, lt_settings);
+
+		/* 2. update DPCD*/
+		if (!retries_ch_eq)
+			/* EPR #361076 - write as a 5-byte burst,
+			 * but only for the 1-st iteration*/
+			dpcd_set_lt_pattern_and_lane_settings(
+				link,
+				lt_settings,
+				hw_tr_pattern);
+		else
+			dpcd_set_lane_settings(link, lt_settings);
+
+		/* 3. wait for receiver to lock-on*/
+		wait_for_training_aux_rd_interval(link, 400);
+
+		/* 4. Read lane status and requested
+		 * drive settings as set by the sink*/
+
+		get_lane_status_and_drive_settings(
+			link,
+			lt_settings,
+			dpcd_lane_status,
+			&dpcd_lane_status_updated,
+			&req_settings);
+
+		/* 5. check CR done*/
+		if (!is_cr_done(lane_count, dpcd_lane_status))
+			return false;
+
+		/* 6. check CHEQ done*/
+		if (is_ch_eq_done(lane_count,
+			dpcd_lane_status,
+			&dpcd_lane_status_updated))
+			return true;
+
+		/* 7. update VS/PE/PC2 in lt_settings*/
+		update_drive_settings(lt_settings, req_settings);
+	}
+
+	return false;
+
+}
+
+static bool perform_clock_recovery_sequence(
+	struct core_link *link,
+	struct link_training_settings *lt_settings)
+{
+	uint32_t retries_cr;
+	uint32_t retry_count;
+	uint32_t lane;
+	struct link_training_settings req_settings;
+	enum dc_lane_count lane_count =
+	lt_settings->link_settings.lane_count;
+	enum hw_dp_training_pattern hw_tr_pattern = HW_DP_TRAINING_PATTERN_1;
+	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+	union lane_align_status_updated dpcd_lane_status_updated;
+
+	retries_cr = 0;
+	retry_count = 0;
+	/* initial drive setting (VS/PE/PC2)*/
+	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+		lt_settings->lane_settings[lane].VOLTAGE_SWING =
+		VOLTAGE_SWING_LEVEL0;
+		lt_settings->lane_settings[lane].PRE_EMPHASIS =
+		PRE_EMPHASIS_DISABLED;
+		lt_settings->lane_settings[lane].POST_CURSOR2 =
+		POST_CURSOR2_DISABLED;
+	}
+
+	dp_set_hw_training_pattern(link, hw_tr_pattern);
+
+	/* najeeb - The synaptics MST hub can put the LT in
+	* infinite loop by switching the VS
+	*/
+	/* between level 0 and level 1 continuously, here
+	* we try for CR lock for LinkTrainingMaxCRRetry count*/
+	while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+	(retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
+		memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
+		memset(&dpcd_lane_status_updated, '\0',
+		sizeof(dpcd_lane_status_updated));
+
+		/* 1. call HWSS to set lane settings*/
+		dp_set_hw_lane_settings(
+				link,
+				lt_settings);
+
+		/* 2. update DPCD of the receiver*/
+		if (!retries_cr)
+			/* EPR #361076 - write as a 5-byte burst,
+			 * but only for the 1-st iteration.*/
+			dpcd_set_lt_pattern_and_lane_settings(
+					link,
+					lt_settings,
+					hw_tr_pattern);
+		else
+			dpcd_set_lane_settings(
+					link,
+					lt_settings);
+
+		/* 3. wait receiver to lock-on*/
+		wait_for_training_aux_rd_interval(
+				link,
+				100);
+
+		/* 4. Read lane status and requested drive
+		* settings as set by the sink
+		*/
+		get_lane_status_and_drive_settings(
+				link,
+				lt_settings,
+				dpcd_lane_status,
+				&dpcd_lane_status_updated,
+				&req_settings);
+
+		/* 5. check CR done*/
+		if (is_cr_done(lane_count, dpcd_lane_status))
+			return true;
+
+		/* 6. max VS reached*/
+		if (is_max_vs_reached(lt_settings))
+			return false;
+
+		/* 7. same voltage*/
+		/* Note: VS same for all lanes,
+		* so comparing first lane is sufficient*/
+		if (lt_settings->lane_settings[0].VOLTAGE_SWING ==
+			req_settings.lane_settings[0].VOLTAGE_SWING)
+			retries_cr++;
+		else
+			retries_cr = 0;
+
+		/* 8. update VS/PE/PC2 in lt_settings*/
+		update_drive_settings(lt_settings, req_settings);
+
+		retry_count++;
+	}
+
+	if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
+		ASSERT(0);
+		dm_logger_write(link->ctx->logger, LOG_ERROR,
+			"%s: Link Training Error, could not \
+			 get CR after %d tries. \
+			Possibly voltage swing issue", __func__,
+			LINK_TRAINING_MAX_CR_RETRY);
+
+	}
+
+	return false;
+}
+
+static inline bool perform_link_training_int(
+	struct core_link *link,
+	struct link_training_settings *lt_settings,
+	bool status)
+{
+	union lane_count_set lane_count_set = { {0} };
+	union dpcd_training_pattern dpcd_pattern = { {0} };
+
+	/* 3. set training not in progress*/
+	dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
+	dpcd_set_training_pattern(link, dpcd_pattern);
+
+	/* 4. mainlink output idle pattern*/
+	dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+
+	/*
+	 * 5. post training adjust if required
+	 * If the upstream DPTX and downstream DPRX both support TPS4,
+	 * TPS4 must be used instead of POST_LT_ADJ_REQ.
+	 */
+	if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 &&
+		get_supported_tp(link) == HW_DP_TRAINING_PATTERN_4)
+		return status;
+
+	if (status &&
+		perform_post_lt_adj_req_sequence(link, lt_settings) == false)
+		status = false;
+
+	lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
+	lane_count_set.bits.ENHANCED_FRAMING = 1;
+	lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
+
+	core_link_write_dpcd(
+		link,
+		DPCD_ADDRESS_LANE_COUNT_SET,
+		&lane_count_set.raw,
+		sizeof(lane_count_set));
+
+	return status;
+}
+
+bool dc_link_dp_perform_link_training(
+	struct dc_link *link,
+	const struct dc_link_settings *link_setting,
+	bool skip_video_pattern)
+{
+	struct core_link *core_link = DC_LINK_TO_CORE(link);
+	bool status;
+
+	char *link_rate = "Unknown";
+	struct link_training_settings lt_settings;
+
+	status = false;
+	memset(&lt_settings, '\0', sizeof(lt_settings));
+
+	lt_settings.link_settings.link_rate = link_setting->link_rate;
+	lt_settings.link_settings.lane_count = link_setting->lane_count;
+
+	/*@todo[vdevulap] move SS to LS, should not be handled by displaypath*/
+
+	/* TODO hard coded to SS for now
+	 * lt_settings.link_settings.link_spread =
+	 * dal_display_path_is_ss_supported(
+	 * path_mode->display_path) ?
+	 * LINK_SPREAD_05_DOWNSPREAD_30KHZ :
+	 * LINK_SPREAD_DISABLED;
+	 */
+	lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+
+	/* 1. set link rate, lane count and spread*/
+	dpcd_set_link_settings(core_link, &lt_settings);
+
+	/* 2. perform link training (set link training done
+	 *  to false is done as well)*/
+	if (perform_clock_recovery_sequence(core_link, &lt_settings)) {
+
+		if (perform_channel_equalization_sequence(core_link,
+				&lt_settings))
+			status = true;
+	}
+
+	if (status || !skip_video_pattern)
+		status = perform_link_training_int(core_link,
+				&lt_settings, status);
+
+	/* 6. print status message*/
+	switch (lt_settings.link_settings.link_rate) {
+
+	case LINK_RATE_LOW:
+		link_rate = "RBR";
+		break;
+	case LINK_RATE_HIGH:
+		link_rate = "HBR";
+		break;
+	case LINK_RATE_HIGH2:
+		link_rate = "HBR2";
+		break;
+	case LINK_RATE_RBR2:
+		link_rate = "RBR2";
+		break;
+	case LINK_RATE_HIGH3:
+		link_rate = "HBR3";
+		break;
+	default:
+		break;
+	}
+
+	/* Connectivity log: link training */
+	CONN_MSG_LT(core_link, "%sx%d %s VS=%d, PE=%d",
+			link_rate,
+			lt_settings.link_settings.lane_count,
+			status ? "pass" : "fail",
+			lt_settings.lane_settings[0].VOLTAGE_SWING,
+			lt_settings.lane_settings[0].PRE_EMPHASIS);
+
+	return status;
+}
+
+
+bool perform_link_training_with_retries(
+	struct core_link *link,
+	const struct dc_link_settings *link_setting,
+	bool skip_video_pattern,
+	int attempts)
+{
+	uint8_t j;
+	uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
+
+	for (j = 0; j < attempts; ++j) {
+
+		if (dc_link_dp_perform_link_training(
+				&link->public,
+				link_setting,
+				skip_video_pattern))
+			return true;
+
+		msleep(delay_between_attempts);
+		delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
+	}
+
+	return false;
+}
+
+/*TODO add more check to see if link support request link configuration */
+static bool is_link_setting_supported(
+	const struct dc_link_settings *link_setting,
+	const struct dc_link_settings *max_link_setting)
+{
+	if (link_setting->lane_count > max_link_setting->lane_count ||
+		link_setting->link_rate > max_link_setting->link_rate)
+		return false;
+	return true;
+}
+
+static const uint32_t get_link_training_fallback_table_len(
+	struct core_link *link)
+{
+	return ARRAY_SIZE(link_training_fallback_table);
+}
+
+static const struct dc_link_settings *get_link_training_fallback_table(
+	struct core_link *link, uint32_t i)
+{
+	return &link_training_fallback_table[i];
+}
+
+static bool exceeded_limit_link_setting(
+	const struct dc_link_settings *link_setting,
+	const struct dc_link_settings *limit_link_setting)
+{
+	return (link_setting->lane_count * link_setting->link_rate
+		 > limit_link_setting->lane_count * limit_link_setting->link_rate ?
+				 true : false);
+}
+
+static struct dc_link_settings get_max_link_cap(struct core_link *link)
+{
+	/* Set Default link settings */
+	struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+			LINK_SPREAD_05_DOWNSPREAD_30KHZ};
+
+	/* Higher link settings based on feature supported */
+	if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
+		max_link_cap.link_rate = LINK_RATE_HIGH2;
+
+	if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
+		max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+	/* Lower link settings based on sink's link cap */
+	if (link->public.reported_link_cap.lane_count < max_link_cap.lane_count)
+		max_link_cap.lane_count =
+				link->public.reported_link_cap.lane_count;
+	if (link->public.reported_link_cap.link_rate < max_link_cap.link_rate)
+		max_link_cap.link_rate =
+				link->public.reported_link_cap.link_rate;
+	if (link->public.reported_link_cap.link_spread <
+			max_link_cap.link_spread)
+		max_link_cap.link_spread =
+				link->public.reported_link_cap.link_spread;
+	return max_link_cap;
+}
+
+bool dp_hbr_verify_link_cap(
+	struct core_link *link,
+	struct dc_link_settings *known_limit_link_setting)
+{
+	struct dc_link_settings max_link_cap = {0};
+	bool success;
+	bool skip_link_training;
+	const struct dc_link_settings *cur;
+	bool skip_video_pattern;
+	uint32_t i;
+	struct clock_source *dp_cs;
+	enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
+
+	success = false;
+	skip_link_training = false;
+
+	max_link_cap = get_max_link_cap(link);
+
+	/* TODO implement override and monitor patch later */
+
+	/* try to train the link from high to low to
+	 * find the physical link capability
+	 */
+	/* disable PHY done possible by BIOS, will be done by driver itself */
+	dp_disable_link_phy(link, link->public.connector_signal);
+
+	dp_cs = link->dc->res_pool->dp_clock_source;
+
+	if (dp_cs)
+		dp_cs_id = dp_cs->id;
+	else {
+		/*
+		 * dp clock source is not initialized for some reason.
+		 * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used
+		 */
+		ASSERT(dp_cs);
+	}
+
+	for (i = 0; i < get_link_training_fallback_table_len(link) &&
+		!success; i++) {
+		cur = get_link_training_fallback_table(link, i);
+
+		if (known_limit_link_setting->lane_count != LANE_COUNT_UNKNOWN &&
+			exceeded_limit_link_setting(cur,
+					known_limit_link_setting))
+			continue;
+
+		if (!is_link_setting_supported(cur, &max_link_cap))
+			continue;
+
+		skip_video_pattern = true;
+		if (cur->link_rate == LINK_RATE_LOW)
+			skip_video_pattern = false;
+
+		dp_enable_link_phy(
+				link,
+				link->public.connector_signal,
+				dp_cs_id,
+				cur);
+
+		if (skip_link_training)
+			success = true;
+		else {
+			success = dc_link_dp_perform_link_training(
+							&link->public,
+							cur,
+							skip_video_pattern);
+		}
+
+		if (success)
+			link->public.verified_link_cap = *cur;
+
+		/* always disable the link before trying another
+		 * setting or before returning we'll enable it later
+		 * based on the actual mode we're driving
+		 */
+		dp_disable_link_phy(link, link->public.connector_signal);
+	}
+
+	/* Link Training failed for all Link Settings
+	 *  (Lane Count is still unknown)
+	 */
+	if (!success) {
+		/* If all LT fails for all settings,
+		 * set verified = failed safe (1 lane low)
+		 */
+		link->public.verified_link_cap.lane_count = LANE_COUNT_ONE;
+		link->public.verified_link_cap.link_rate = LINK_RATE_LOW;
+
+		link->public.verified_link_cap.link_spread =
+		LINK_SPREAD_DISABLED;
+	}
+
+	link->public.max_link_setting = link->public.verified_link_cap;
+
+	return success;
+}
+
+static uint32_t bandwidth_in_kbps_from_timing(
+	const struct dc_crtc_timing *timing)
+{
+	uint32_t bits_per_channel = 0;
+	uint32_t kbps;
+	switch (timing->display_color_depth) {
+
+	case COLOR_DEPTH_666:
+		bits_per_channel = 6;
+		break;
+	case COLOR_DEPTH_888:
+		bits_per_channel = 8;
+		break;
+	case COLOR_DEPTH_101010:
+		bits_per_channel = 10;
+		break;
+	case COLOR_DEPTH_121212:
+		bits_per_channel = 12;
+		break;
+	case COLOR_DEPTH_141414:
+		bits_per_channel = 14;
+		break;
+	case COLOR_DEPTH_161616:
+		bits_per_channel = 16;
+		break;
+	default:
+		break;
+	}
+	ASSERT(bits_per_channel != 0);
+
+	kbps = timing->pix_clk_khz;
+	kbps *= bits_per_channel;
+
+	if (timing->flags.Y_ONLY != 1)
+		/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+		kbps *= 3;
+
+	return kbps;
+
+}
+
+static uint32_t bandwidth_in_kbps_from_link_settings(
+	const struct dc_link_settings *link_setting)
+{
+	uint32_t link_rate_in_kbps = link_setting->link_rate *
+		LINK_RATE_REF_FREQ_IN_KHZ;
+
+	uint32_t lane_count  = link_setting->lane_count;
+	uint32_t kbps = link_rate_in_kbps;
+	kbps *= lane_count;
+	kbps *= 8;   /* 8 bits per byte*/
+
+	return kbps;
+
+}
+
+bool dp_validate_mode_timing(
+	struct core_link *link,
+	const struct dc_crtc_timing *timing)
+{
+	uint32_t req_bw;
+	uint32_t max_bw;
+
+	const struct dc_link_settings *link_setting;
+
+	/*always DP fail safe mode*/
+	if (timing->pix_clk_khz == (uint32_t)25175 &&
+		timing->h_addressable == (uint32_t)640 &&
+		timing->v_addressable == (uint32_t)480)
+		return true;
+
+	/* We always use verified link settings */
+	link_setting = &link->public.verified_link_cap;
+
+	/* TODO: DYNAMIC_VALIDATION needs to be implemented */
+	/*if (flags.DYNAMIC_VALIDATION == 1 &&
+		link->public.verified_link_cap.lane_count != LANE_COUNT_UNKNOWN)
+		link_setting = &link->public.verified_link_cap;
+	*/
+
+	req_bw = bandwidth_in_kbps_from_timing(timing);
+	max_bw = bandwidth_in_kbps_from_link_settings(link_setting);
+
+	if (req_bw <= max_bw) {
+		/* remember the biggest mode here, during
+		 * initial link training (to get
+		 * verified_link_cap), LS sends event about
+		 * cannot train at reported cap to upper
+		 * layer and upper layer will re-enumerate modes.
+		 * this is not necessary if the lower
+		 * verified_link_cap is enough to drive
+		 * all the modes */
+
+		/* TODO: DYNAMIC_VALIDATION needs to be implemented */
+		/* if (flags.DYNAMIC_VALIDATION == 1)
+			dpsst->max_req_bw_for_verified_linkcap = dal_max(
+				dpsst->max_req_bw_for_verified_linkcap, req_bw); */
+		return true;
+	} else
+		return false;
+}
+
+void decide_link_settings(struct core_stream *stream,
+	struct dc_link_settings *link_setting)
+{
+
+	const struct dc_link_settings *cur_ls;
+	struct core_link* link;
+	uint32_t req_bw;
+	uint32_t link_bw;
+	uint32_t i;
+
+	req_bw = bandwidth_in_kbps_from_timing(
+			&stream->public.timing);
+
+	/* if preferred is specified through AMDDP, use it, if it's enough
+	 * to drive the mode
+	 */
+	link = stream->sink->link;
+
+	if ((link->public.reported_link_cap.lane_count != LANE_COUNT_UNKNOWN) &&
+		(link->public.reported_link_cap.link_rate <=
+				link->public.verified_link_cap.link_rate)) {
+
+		link_bw = bandwidth_in_kbps_from_link_settings(
+				&link->public.reported_link_cap);
+
+		if (req_bw < link_bw) {
+			*link_setting = link->public.reported_link_cap;
+			return;
+		}
+	}
+
+	/* search for first suitable setting for the requested
+	 * bandwidth
+	 */
+	for (i = 0; i < get_link_training_fallback_table_len(link); i++) {
+
+		cur_ls = get_link_training_fallback_table(link, i);
+
+		link_bw =
+				bandwidth_in_kbps_from_link_settings(
+				cur_ls);
+
+		if (req_bw < link_bw) {
+			if (is_link_setting_supported(
+				cur_ls,
+				&link->public.max_link_setting)) {
+				*link_setting = *cur_ls;
+				return;
+			}
+		}
+	}
+
+	BREAK_TO_DEBUGGER();
+	ASSERT(link->public.verified_link_cap.lane_count !=
+		LANE_COUNT_UNKNOWN);
+
+	*link_setting = link->public.verified_link_cap;
+}
+
+/*************************Short Pulse IRQ***************************/
+
+static bool hpd_rx_irq_check_link_loss_status(
+	struct core_link *link,
+	union hpd_irq_data *hpd_irq_dpcd_data)
+{
+	uint8_t irq_reg_rx_power_state;
+	enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
+	union lane_status lane_status;
+	uint32_t lane;
+	bool sink_status_changed;
+	bool return_code;
+
+	sink_status_changed = false;
+	return_code = false;
+
+	if (link->public.cur_link_settings.lane_count == 0)
+		return return_code;
+	/*1. Check that we can handle interrupt: Not in FS DOS,
+	 *  Not in "Display Timeout" state, Link is trained.
+	 */
+
+	dpcd_result = core_link_read_dpcd(link,
+		DPCD_ADDRESS_POWER_STATE,
+		&irq_reg_rx_power_state,
+		sizeof(irq_reg_rx_power_state));
+
+	if (dpcd_result != DC_OK) {
+		irq_reg_rx_power_state = DP_PWR_STATE_D0;
+		dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+			"%s: DPCD read failed to obtain power state.\n",
+			__func__);
+	}
+
+	if (irq_reg_rx_power_state == DP_PWR_STATE_D0) {
+
+		/*2. Check that Link Status changed, before re-training.*/
+
+		/*parse lane status*/
+		for (lane = 0;
+			lane < link->public.cur_link_settings.lane_count;
+			lane++) {
+
+			/* check status of lanes 0,1
+			 * changed DpcdAddress_Lane01Status (0x202)*/
+			lane_status.raw = get_nibble_at_index(
+				&hpd_irq_dpcd_data->bytes.lane01_status.raw,
+				lane);
+
+			if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+				!lane_status.bits.CR_DONE_0 ||
+				!lane_status.bits.SYMBOL_LOCKED_0) {
+				/* if one of the channel equalization, clock
+				 * recovery or symbol lock is dropped
+				 * consider it as (link has been
+				 * dropped) dp sink status has changed*/
+				sink_status_changed = true;
+				break;
+			}
+
+		}
+
+		/* Check interlane align.*/
+		if (sink_status_changed ||
+			!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.
+			INTERLANE_ALIGN_DONE) {
+
+			dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+				"%s: Link Status changed.\n",
+				__func__);
+
+			return_code = true;
+		}
+	}
+
+	return return_code;
+}
+
+static enum dc_status read_hpd_rx_irq_data(
+	struct core_link *link,
+	union hpd_irq_data *irq_data)
+{
+	/* The HW reads 16 bytes from 200h on HPD,
+	 * but if we get an AUX_DEFER, the HW cannot retry
+	 * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
+	 * fail, so we now explicitly read 6 bytes which is
+	 * the req from the above mentioned test cases.
+	 */
+	return core_link_read_dpcd(
+	link,
+	DPCD_ADDRESS_SINK_COUNT,
+	irq_data->raw,
+	sizeof(union hpd_irq_data));
+}
+
+static bool allow_hpd_rx_irq(const struct core_link *link)
+{
+	/*
+	 * Don't handle RX IRQ unless one of following is met:
+	 * 1) The link is established (cur_link_settings != unknown)
+	 * 2) We kicked off MST detection
+	 * 3) We know we're dealing with an active dongle
+	 */
+
+	if ((link->public.cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
+		(link->public.type == dc_connection_mst_branch) ||
+		is_dp_active_dongle(link))
+		return true;
+
+	return false;
+}
+
+static bool handle_hpd_irq_psr_sink(const struct core_link *link)
+{
+	union dpcd_psr_configuration psr_configuration;
+
+	if (link->public.psr_caps.psr_version == 0)
+		return false;
+
+	dal_ddc_service_read_dpcd_data(
+					link->ddc,
+					368 /*DpcdAddress_PSR_Enable_Cfg*/,
+					&psr_configuration.raw,
+					sizeof(psr_configuration.raw));
+
+	if (psr_configuration.bits.ENABLE) {
+		unsigned char dpcdbuf[3] = {0};
+		union psr_error_status psr_error_status;
+		union psr_sink_psr_status psr_sink_psr_status;
+
+		dal_ddc_service_read_dpcd_data(
+					link->ddc,
+					0x2006 /*DpcdAddress_PSR_Error_Status*/,
+					(unsigned char *) dpcdbuf,
+					sizeof(dpcdbuf));
+
+		/*DPCD 2006h   ERROR STATUS*/
+		psr_error_status.raw = dpcdbuf[0];
+		/*DPCD 2008h   SINK PANEL SELF REFRESH STATUS*/
+		psr_sink_psr_status.raw = dpcdbuf[2];
+
+		if (psr_error_status.bits.LINK_CRC_ERROR ||
+				psr_error_status.bits.RFB_STORAGE_ERROR) {
+			/* Acknowledge and clear error bits */
+			dal_ddc_service_write_dpcd_data(
+				link->ddc,
+				8198 /*DpcdAddress_PSR_Error_Status*/,
+				&psr_error_status.raw,
+				sizeof(psr_error_status.raw));
+
+			/* PSR error, disable and re-enable PSR */
+			dc_link_set_psr_enable(&link->public, false);
+			dc_link_set_psr_enable(&link->public, true);
+
+			return true;
+		} else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
+				PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB){
+			/* No error is detect, PSR is active.
+			 * We should return with IRQ_HPD handled without
+			 * checking for loss of sync since PSR would have
+			 * powered down main link.
+			 */
+			return true;
+		}
+	}
+	return false;
+}
+
+static void dp_test_send_link_training(struct core_link *link)
+{
+	struct dc_link_settings link_settings;
+
+	core_link_read_dpcd(
+			link,
+			DPCD_ADDRESS_TEST_LANE_COUNT,
+			(unsigned char *)(&link_settings.lane_count),
+			1);
+	core_link_read_dpcd(
+			link,
+			DPCD_ADDRESS_TEST_LINK_RATE,
+			(unsigned char *)(&link_settings.link_rate),
+			1);
+
+	/* Set preferred link settings */
+	link->public.verified_link_cap.lane_count = link_settings.lane_count;
+	link->public.verified_link_cap.link_rate = link_settings.link_rate;
+
+	dp_retrain_link(link);
+}
+
+static void dp_test_send_phy_test_pattern(struct core_link *link)
+{
+	union phy_test_pattern dpcd_test_pattern;
+	union lane_adjust dpcd_lane_adjustment[2];
+	unsigned char dpcd_post_cursor_2_adjustment = 0;
+	unsigned char test_80_bit_pattern[
+			(DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_79_72 -
+			DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0};
+	enum dp_test_pattern test_pattern;
+	struct dc_link_training_settings link_settings;
+	union lane_adjust dpcd_lane_adjust;
+	unsigned int lane;
+	struct link_training_settings link_training_settings;
+	int i = 0;
+
+	dpcd_test_pattern.raw = 0;
+	memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment));
+	memset(&link_settings, 0, sizeof(link_settings));
+
+	/* get phy test pattern and pattern parameters from DP receiver */
+	core_link_read_dpcd(
+			link,
+			DPCD_ADDRESS_TEST_PHY_PATTERN,
+			&dpcd_test_pattern.raw,
+			sizeof(dpcd_test_pattern));
+	core_link_read_dpcd(
+			link,
+			DPCD_ADDRESS_ADJUST_REQUEST_LANE0_1,
+			&dpcd_lane_adjustment[0].raw,
+			sizeof(dpcd_lane_adjustment));
+
+	/*get post cursor 2 parameters
+	 * For DP 1.1a or eariler, this DPCD register's value is 0
+	 * For DP 1.2 or later:
+	 * Bits 1:0 = POST_CURSOR2_LANE0; Bits 3:2 = POST_CURSOR2_LANE1
+	 * Bits 5:4 = POST_CURSOR2_LANE2; Bits 7:6 = POST_CURSOR2_LANE3
+	 */
+	core_link_read_dpcd(
+			link,
+			DPCD_ADDRESS_ADJUST_REQUEST_POST_CURSOR2,
+			&dpcd_post_cursor_2_adjustment,
+			sizeof(dpcd_post_cursor_2_adjustment));
+
+	/* translate request */
+	switch (dpcd_test_pattern.bits.PATTERN) {
+	case PHY_TEST_PATTERN_D10_2:
+		test_pattern = DP_TEST_PATTERN_D102;
+	break;
+	case PHY_TEST_PATTERN_SYMBOL_ERROR:
+		test_pattern = DP_TEST_PATTERN_SYMBOL_ERROR;
+	break;
+	case PHY_TEST_PATTERN_PRBS7:
+		test_pattern = DP_TEST_PATTERN_PRBS7;
+	break;
+	case PHY_TEST_PATTERN_80BIT_CUSTOM:
+		test_pattern = DP_TEST_PATTERN_80BIT_CUSTOM;
+	break;
+	case PHY_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
+		test_pattern = DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
+	break;
+	default:
+		test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+	break;
+	}
+
+	if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM)
+		core_link_read_dpcd(
+				link,
+				DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_7_0,
+				test_80_bit_pattern,
+				sizeof(test_80_bit_pattern));
+
+	/* prepare link training settings */
+	link_settings.link = link->public.cur_link_settings;
+
+	for (lane = 0; lane <
+		(unsigned int)(link->public.cur_link_settings.lane_count);
+		lane++) {
+		dpcd_lane_adjust.raw =
+			get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane);
+		link_settings.lane_settings[lane].VOLTAGE_SWING =
+			(enum dc_voltage_swing)
+			(dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
+		link_settings.lane_settings[lane].PRE_EMPHASIS =
+			(enum dc_pre_emphasis)
+			(dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
+		link_settings.lane_settings[lane].POST_CURSOR2 =
+			(enum dc_post_cursor2)
+			((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
+	}
+
+	for (i = 0; i < 4; i++)
+		link_training_settings.lane_settings[i] =
+				link_settings.lane_settings[i];
+	link_training_settings.link_settings = link_settings.link;
+	link_training_settings.allow_invalid_msa_timing_param = false;
+	/*Usage: Measure DP physical lane signal
+	 * by DP SI test equipment automatically.
+	 * PHY test pattern request is generated by equipment via HPD interrupt.
+	 * HPD needs to be active all the time. HPD should be active
+	 * all the time. Do not touch it.
+	 * forward request to DS
+	 */
+	dc_link_dp_set_test_pattern(
+		&link->public,
+		test_pattern,
+		&link_training_settings,
+		test_80_bit_pattern,
+		(DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_79_72 -
+		DPCD_ADDRESS_TEST_80BIT_CUSTOM_PATTERN_7_0)+1);
+}
+
+static void dp_test_send_link_test_pattern(struct core_link *link)
+{
+	union link_test_pattern dpcd_test_pattern;
+	union test_misc dpcd_test_params;
+	enum dp_test_pattern test_pattern;
+
+	memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
+	memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
+
+	/* get link test pattern and pattern parameters */
+	core_link_read_dpcd(
+			link,
+			DPCD_ADDRESS_TEST_PATTERN,
+			&dpcd_test_pattern.raw,
+			sizeof(dpcd_test_pattern));
+	core_link_read_dpcd(
+			link,
+			DPCD_ADDRESS_TEST_MISC1,
+			&dpcd_test_params.raw,
+			sizeof(dpcd_test_params));
+
+	switch (dpcd_test_pattern.bits.PATTERN) {
+	case LINK_TEST_PATTERN_COLOR_RAMP:
+		test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
+	break;
+	case LINK_TEST_PATTERN_VERTICAL_BARS:
+		test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
+	break; /* black and white */
+	case LINK_TEST_PATTERN_COLOR_SQUARES:
+		test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
+				TEST_DYN_RANGE_VESA ?
+				DP_TEST_PATTERN_COLOR_SQUARES :
+				DP_TEST_PATTERN_COLOR_SQUARES_CEA);
+	break;
+	default:
+		test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+	break;
+	}
+
+	dc_link_dp_set_test_pattern(
+			&link->public,
+			test_pattern,
+			NULL,
+			NULL,
+			0);
+}
+
+static void handle_automated_test(struct core_link *link)
+{
+	union test_request test_request;
+	union test_response test_response;
+
+	memset(&test_request, 0, sizeof(test_request));
+	memset(&test_response, 0, sizeof(test_response));
+
+	core_link_read_dpcd(
+		link,
+		DPCD_ADDRESS_TEST_REQUEST,
+		&test_request.raw,
+		sizeof(union test_request));
+	if (test_request.bits.LINK_TRAINING) {
+		/* ACK first to let DP RX test box monitor LT sequence */
+		test_response.bits.ACK = 1;
+		core_link_write_dpcd(
+			link,
+			DPCD_ADDRESS_TEST_RESPONSE,
+			&test_response.raw,
+			sizeof(test_response));
+		dp_test_send_link_training(link);
+		/* no acknowledge request is needed again */
+		test_response.bits.ACK = 0;
+	}
+	if (test_request.bits.LINK_TEST_PATTRN) {
+		dp_test_send_link_test_pattern(link);
+		link->public.compliance_test_state.bits.
+			SET_TEST_PATTERN_PENDING = 1;
+	}
+	if (test_request.bits.PHY_TEST_PATTERN) {
+		dp_test_send_phy_test_pattern(link);
+		test_response.bits.ACK = 1;
+	}
+	if (!test_request.raw)
+		/* no requests, revert all test signals
+		 * TODO: revert all test signals
+		 */
+		test_response.bits.ACK = 1;
+	/* send request acknowledgment */
+	if (test_response.bits.ACK)
+		core_link_write_dpcd(
+			link,
+			DPCD_ADDRESS_TEST_RESPONSE,
+			&test_response.raw,
+			sizeof(test_response));
+}
+
+bool dc_link_handle_hpd_rx_irq(const struct dc_link *dc_link)
+{
+	struct core_link *link = DC_LINK_TO_LINK(dc_link);
+	union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
+	union device_service_irq device_service_clear = {0};
+	enum dc_status result = DDC_RESULT_UNKNOWN;
+	bool status = false;
+	/* For use cases related to down stream connection status change,
+	 * PSR and device auto test, refer to function handle_sst_hpd_irq
+	 * in DAL2.1*/
+
+	dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+		"%s: Got short pulse HPD on link %d\n",
+		__func__, link->public.link_index);
+
+	 /* All the "handle_hpd_irq_xxx()" methods
+		 * should be called only after
+		 * dal_dpsst_ls_read_hpd_irq_data
+		 * Order of calls is important too
+		 */
+	result = read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data);
+
+	if (result != DC_OK) {
+		dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+			"%s: DPCD read failed to obtain irq data\n",
+			__func__);
+		return false;
+	}
+
+	if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+		device_service_clear.bits.AUTOMATED_TEST = 1;
+		core_link_write_dpcd(
+			link,
+			DPCD_ADDRESS_DEVICE_SERVICE_IRQ_VECTOR,
+			&device_service_clear.raw,
+			sizeof(device_service_clear.raw));
+		device_service_clear.raw = 0;
+		handle_automated_test(link);
+		return false;
+	}
+
+	if (!allow_hpd_rx_irq(link)) {
+		dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+			"%s: skipping HPD handling on %d\n",
+			__func__, link->public.link_index);
+		return false;
+	}
+
+	if (handle_hpd_irq_psr_sink(link))
+		/* PSR-related error was detected and handled */
+		return true;
+
+	/* If PSR-related error handled, Main link may be off,
+	 * so do not handle as a normal sink status change interrupt.
+	 */
+
+	/* check if we have MST msg and return since we poll for it */
+	if (hpd_irq_dpcd_data.bytes.device_service_irq.
+			bits.DOWN_REP_MSG_RDY ||
+		hpd_irq_dpcd_data.bytes.device_service_irq.
+			bits.UP_REQ_MSG_RDY)
+		return false;
+
+	/* For now we only handle 'Downstream port status' case.
+	 * If we got sink count changed it means
+	 * Downstream port status changed,
+	 * then DM should call DC to do the detection. */
+	if (hpd_rx_irq_check_link_loss_status(
+		link,
+		&hpd_irq_dpcd_data)) {
+		/* Connectivity log: link loss */
+		CONN_DATA_LINK_LOSS(link,
+					hpd_irq_dpcd_data.raw,
+					sizeof(hpd_irq_dpcd_data),
+					"Status: ");
+
+		perform_link_training_with_retries(link,
+			&link->public.cur_link_settings,
+			true, LINK_TRAINING_ATTEMPTS);
+
+		status = false;
+	}
+
+	if (link->public.type == dc_connection_active_dongle &&
+		hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT
+			!= link->dpcd_sink_count)
+		status = true;
+
+	/* reasons for HPD RX:
+	 * 1. Link Loss - ie Re-train the Link
+	 * 2. MST sideband message
+	 * 3. Automated Test - ie. Internal Commit
+	 * 4. CP (copy protection) - (not interesting for DM???)
+	 * 5. DRR
+	 * 6. Downstream Port status changed
+	 * -ie. Detect - this the only one
+	 * which is interesting for DM because
+	 * it must call dc_link_detect.
+	 */
+	return status;
+}
+
+/*query dpcd for version and mst cap addresses*/
+bool is_mst_supported(struct core_link *link)
+{
+	bool mst          = false;
+	enum dc_status st = DC_OK;
+	union dpcd_rev rev;
+	union mstm_cap cap;
+
+	rev.raw  = 0;
+	cap.raw  = 0;
+
+	st = core_link_read_dpcd(link, DPCD_ADDRESS_DPCD_REV, &rev.raw,
+			sizeof(rev));
+
+	if (st == DC_OK && rev.raw >= DPCD_REV_12) {
+
+		st = core_link_read_dpcd(link, DPCD_ADDRESS_MSTM_CAP,
+				&cap.raw, sizeof(cap));
+		if (st == DC_OK && cap.bits.MST_CAP == 1)
+			mst = true;
+	}
+	return mst;
+
+}
+
+bool is_dp_active_dongle(const struct core_link *link)
+{
+	enum display_dongle_type dongle_type = link->dpcd_caps.dongle_type;
+
+	return (dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) ||
+			(dongle_type == DISPLAY_DONGLE_DP_DVI_CONVERTER) ||
+			(dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
+}
+
+static void get_active_converter_info(
+	uint8_t data, struct core_link *link)
+{
+	union dp_downstream_port_present ds_port = { .byte = data };
+
+	/* decode converter info*/
+	if (!ds_port.fields.PORT_PRESENT) {
+		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+		ddc_service_set_dongle_type(link->ddc,
+				link->dpcd_caps.dongle_type);
+		return;
+	}
+
+	switch (ds_port.fields.PORT_TYPE) {
+	case DOWNSTREAM_VGA:
+		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
+		break;
+	case DOWNSTREAM_DVI_HDMI:
+		/* At this point we don't know is it DVI or HDMI,
+		 * assume DVI.*/
+		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER;
+		break;
+	default:
+		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+		break;
+	}
+
+	if (link->dpcd_caps.dpcd_rev.raw >= DCS_DPCD_REV_11) {
+		uint8_t det_caps[4];
+		union dwnstream_port_caps_byte0 *port_caps =
+			(union dwnstream_port_caps_byte0 *)det_caps;
+		core_link_read_dpcd(link, DPCD_ADDRESS_DWN_STRM_PORT0_CAPS,
+				det_caps, sizeof(det_caps));
+
+		switch (port_caps->bits.DWN_STRM_PORTX_TYPE) {
+		case DOWN_STREAM_DETAILED_VGA:
+			link->dpcd_caps.dongle_type =
+				DISPLAY_DONGLE_DP_VGA_CONVERTER;
+			break;
+		case DOWN_STREAM_DETAILED_DVI:
+			link->dpcd_caps.dongle_type =
+				DISPLAY_DONGLE_DP_DVI_CONVERTER;
+			break;
+		case DOWN_STREAM_DETAILED_HDMI:
+			link->dpcd_caps.dongle_type =
+				DISPLAY_DONGLE_DP_HDMI_CONVERTER;
+
+			if (ds_port.fields.DETAILED_CAPS) {
+
+				union dwnstream_port_caps_byte3_hdmi
+					hdmi_caps = {.raw = det_caps[3] };
+
+				link->dpcd_caps.is_dp_hdmi_s3d_converter =
+					hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
+			}
+			break;
+		}
+	}
+
+	ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
+
+	{
+		struct dp_device_vendor_id dp_id;
+
+		/* read IEEE branch device id */
+		core_link_read_dpcd(
+			link,
+			DPCD_ADDRESS_BRANCH_DEVICE_ID_START,
+			(uint8_t *)&dp_id,
+			sizeof(dp_id));
+
+		link->dpcd_caps.branch_dev_id =
+			(dp_id.ieee_oui[0] << 16) +
+			(dp_id.ieee_oui[1] << 8) +
+			dp_id.ieee_oui[2];
+
+		memmove(
+			link->dpcd_caps.branch_dev_name,
+			dp_id.ieee_device_id,
+			sizeof(dp_id.ieee_device_id));
+	}
+
+	{
+		struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+
+		core_link_read_dpcd(
+			link,
+			DPCD_ADDRESS_BRANCH_REVISION_START,
+			(uint8_t *)&dp_hw_fw_revision,
+			sizeof(dp_hw_fw_revision));
+
+		link->dpcd_caps.branch_hw_revision =
+			dp_hw_fw_revision.ieee_hw_rev;
+	}
+}
+
+static void dp_wa_power_up_0010FA(struct core_link *link, uint8_t *dpcd_data,
+		int length)
+{
+	int retry = 0;
+	union dp_downstream_port_present ds_port = { 0 };
+
+	if (!link->dpcd_caps.dpcd_rev.raw) {
+		do {
+			dp_receiver_power_ctrl(link, true);
+			core_link_read_dpcd(link, DPCD_ADDRESS_DPCD_REV,
+							dpcd_data, length);
+			link->dpcd_caps.dpcd_rev.raw = dpcd_data[
+				DPCD_ADDRESS_DPCD_REV -
+				DPCD_ADDRESS_DPCD_REV];
+		} while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);
+	}
+
+	ds_port.byte = dpcd_data[DPCD_ADDRESS_DOWNSTREAM_PORT_PRESENT -
+				 DPCD_ADDRESS_DPCD_REV];
+
+	if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
+		switch (link->dpcd_caps.branch_dev_id) {
+		/* Some active dongles (DP-VGA, DP-DLDVI converters) power down
+		 * all internal circuits including AUX communication preventing
+		 * reading DPCD table and EDID (spec violation).
+		 * Encoder will skip DP RX power down on disable_output to
+		 * keep receiver powered all the time.*/
+		case DP_BRANCH_DEVICE_ID_1:
+		case DP_BRANCH_DEVICE_ID_4:
+			link->wa_flags.dp_keep_receiver_powered = true;
+			break;
+
+		/* TODO: May need work around for other dongles. */
+		default:
+			link->wa_flags.dp_keep_receiver_powered = false;
+			break;
+		}
+	} else
+		link->wa_flags.dp_keep_receiver_powered = false;
+}
+
+static void retrieve_psr_link_cap(struct core_link *link,
+		enum edp_revision edp_revision)
+{
+	if (edp_revision >= EDP_REVISION_13) {
+		core_link_read_dpcd(link,
+				DPCD_ADDRESS_PSR_SUPPORT_VER,
+				(uint8_t *)(&link->public.psr_caps),
+				sizeof(link->public.psr_caps));
+		if (link->public.psr_caps.psr_version != 0) {
+			unsigned char psr_capability = 0;
+
+			core_link_read_dpcd(link,
+					DPCD_ADDRESS_PSR_CAPABILITY,
+						&psr_capability,
+						sizeof(psr_capability));
+			/* Bit 0 determines whether fast link training is
+			 * required on PSR exit. If set to 0, link training
+			 * is required. If set to 1, sink must lock within
+			 * five Idle Patterns after Main Link is turned on.
+			 */
+			link->public.psr_caps.psr_exit_link_training_required
+						= !(psr_capability & 0x1);
+
+			psr_capability = (psr_capability >> 1) & 0x7;
+			link->public.psr_caps.psr_rfb_setup_time =
+					55 * (6 - psr_capability);
+		}
+	}
+}
+
+static void retrieve_link_cap(struct core_link *link)
+{
+	uint8_t dpcd_data[DPCD_ADDRESS_TRAINING_AUX_RD_INTERVAL - DPCD_ADDRESS_DPCD_REV + 1];
+
+	union down_stream_port_count down_strm_port_count;
+	union edp_configuration_cap edp_config_cap;
+	union dp_downstream_port_present ds_port = { 0 };
+
+	memset(dpcd_data, '\0', sizeof(dpcd_data));
+	memset(&down_strm_port_count,
+		'\0', sizeof(union down_stream_port_count));
+	memset(&edp_config_cap, '\0',
+		sizeof(union edp_configuration_cap));
+
+	core_link_read_dpcd(
+		link,
+		DPCD_ADDRESS_DPCD_REV,
+		dpcd_data,
+		sizeof(dpcd_data));
+
+	link->dpcd_caps.dpcd_rev.raw =
+		dpcd_data[DPCD_ADDRESS_DPCD_REV - DPCD_ADDRESS_DPCD_REV];
+
+	{
+		union training_aux_rd_interval aux_rd_interval;
+
+		aux_rd_interval.raw =
+			dpcd_data[DPCD_ADDRESS_TRAINING_AUX_RD_INTERVAL];
+
+		if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) {
+			core_link_read_dpcd(
+				link,
+				DPCD_ADDRESS_DP13_DPCD_REV,
+				dpcd_data,
+				sizeof(dpcd_data));
+		}
+	}
+
+	ds_port.byte = dpcd_data[DPCD_ADDRESS_DOWNSTREAM_PORT_PRESENT -
+				 DPCD_ADDRESS_DPCD_REV];
+
+	get_active_converter_info(ds_port.byte, link);
+
+	dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
+
+	link->dpcd_caps.allow_invalid_MSA_timing_param =
+		down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
+
+	link->dpcd_caps.max_ln_count.raw = dpcd_data[
+		DPCD_ADDRESS_MAX_LANE_COUNT - DPCD_ADDRESS_DPCD_REV];
+
+	link->dpcd_caps.max_down_spread.raw = dpcd_data[
+		DPCD_ADDRESS_MAX_DOWNSPREAD - DPCD_ADDRESS_DPCD_REV];
+
+	link->public.reported_link_cap.lane_count =
+		link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
+	link->public.reported_link_cap.link_rate = dpcd_data[
+		DPCD_ADDRESS_MAX_LINK_RATE - DPCD_ADDRESS_DPCD_REV];
+	link->public.reported_link_cap.link_spread =
+		link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
+		LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
+
+	edp_config_cap.raw = dpcd_data[
+		DPCD_ADDRESS_EDP_CONFIG_CAP - DPCD_ADDRESS_DPCD_REV];
+	link->dpcd_caps.panel_mode_edp =
+		edp_config_cap.bits.ALT_SCRAMBLER_RESET;
+
+	link->edp_revision = DPCD_EDP_REVISION_EDP_UNKNOWN;
+
+	link->public.test_pattern_enabled = false;
+	link->public.compliance_test_state.raw = 0;
+
+	link->public.psr_caps.psr_exit_link_training_required = false;
+	link->public.psr_caps.psr_frame_capture_indication_req = false;
+	link->public.psr_caps.psr_rfb_setup_time = 0;
+	link->public.psr_caps.psr_sdp_transmit_line_num_deadline = 0;
+	link->public.psr_caps.psr_version = 0;
+
+	/* read sink count */
+	core_link_read_dpcd(link,
+			DPCD_ADDRESS_SINK_COUNT,
+			&link->dpcd_caps.sink_count.raw,
+			sizeof(link->dpcd_caps.sink_count.raw));
+
+	/* Display control registers starting at DPCD 700h are only valid and
+	 * enabled if this eDP config cap bit is set. */
+	if (edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE) {
+		/* Read the Panel's eDP revision at DPCD 700h. */
+		core_link_read_dpcd(link,
+			DPCD_ADDRESS_EDP_REV,
+			(uint8_t *)(&link->edp_revision),
+			sizeof(link->edp_revision));
+	}
+
+	/* Connectivity log: detection */
+	CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
+
+	/* TODO: Confirm if need retrieve_psr_link_cap */
+	retrieve_psr_link_cap(link, link->edp_revision);
+}
+
+void detect_dp_sink_caps(struct core_link *link)
+{
+	retrieve_link_cap(link);
+
+	/* dc init_hw has power encoder using default
+	 * signal for connector. For native DP, no
+	 * need to power up encoder again. If not native
+	 * DP, hw_init may need check signal or power up
+	 * encoder here.
+	 */
+
+	if (is_mst_supported(link)) {
+		link->public.verified_link_cap = link->public.reported_link_cap;
+	} else {
+		dp_hbr_verify_link_cap(link,
+			&link->public.reported_link_cap);
+	}
+	/* TODO save sink caps in link->sink */
+}
+
+void dc_link_dp_enable_hpd(const struct dc_link *link)
+{
+	struct core_link *core_link = DC_LINK_TO_CORE(link);
+	struct link_encoder *encoder = core_link->link_enc;
+
+	if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
+		encoder->funcs->enable_hpd(encoder);
+}
+
+void dc_link_dp_disable_hpd(const struct dc_link *link)
+{
+	struct core_link *core_link = DC_LINK_TO_CORE(link);
+	struct link_encoder *encoder = core_link->link_enc;
+
+	if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
+		encoder->funcs->disable_hpd(encoder);
+}
+
+static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
+{
+	if (test_pattern == DP_TEST_PATTERN_D102 ||
+	test_pattern == DP_TEST_PATTERN_SYMBOL_ERROR ||
+	test_pattern == DP_TEST_PATTERN_PRBS7 ||
+	test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM ||
+	test_pattern == DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE ||
+	test_pattern == DP_TEST_PATTERN_TRAINING_PATTERN1 ||
+	test_pattern == DP_TEST_PATTERN_TRAINING_PATTERN2 ||
+	test_pattern == DP_TEST_PATTERN_TRAINING_PATTERN3 ||
+	test_pattern == DP_TEST_PATTERN_TRAINING_PATTERN4 ||
+	test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
+		return true;
+	else
+		return false;
+}
+
+static void set_crtc_test_pattern(struct core_link *link,
+				struct pipe_ctx *pipe_ctx,
+				enum dp_test_pattern test_pattern)
+{
+	enum controller_dp_test_pattern controller_test_pattern;
+	enum dc_color_depth color_depth = pipe_ctx->
+		stream->public.timing.display_color_depth;
+	struct bit_depth_reduction_params params;
+
+	memset(&params, 0, sizeof(params));
+
+	switch (test_pattern) {
+	case DP_TEST_PATTERN_COLOR_SQUARES:
+		controller_test_pattern =
+				CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+	break;
+	case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+		controller_test_pattern =
+				CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA;
+	break;
+	case DP_TEST_PATTERN_VERTICAL_BARS:
+		controller_test_pattern =
+				CONTROLLER_DP_TEST_PATTERN_VERTICALBARS;
+	break;
+	case DP_TEST_PATTERN_HORIZONTAL_BARS:
+		controller_test_pattern =
+				CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS;
+	break;
+	case DP_TEST_PATTERN_COLOR_RAMP:
+		controller_test_pattern =
+				CONTROLLER_DP_TEST_PATTERN_COLORRAMP;
+	break;
+	default:
+		controller_test_pattern =
+				CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
+	break;
+	}
+
+	switch (test_pattern) {
+	case DP_TEST_PATTERN_COLOR_SQUARES:
+	case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+	case DP_TEST_PATTERN_VERTICAL_BARS:
+	case DP_TEST_PATTERN_HORIZONTAL_BARS:
+	case DP_TEST_PATTERN_COLOR_RAMP:
+	{
+		/* disable bit depth reduction */
+		pipe_ctx->stream->bit_depth_params = params;
+		pipe_ctx->opp->funcs->
+			opp_program_bit_depth_reduction(pipe_ctx->opp, &params);
+
+		pipe_ctx->tg->funcs->set_test_pattern(pipe_ctx->tg,
+				controller_test_pattern, color_depth);
+	}
+	break;
+	case DP_TEST_PATTERN_VIDEO_MODE:
+	{
+		/* restore bitdepth reduction */
+		link->dc->current_context->res_ctx.pool->funcs->
+			build_bit_depth_reduction_params(pipe_ctx->stream,
+					&params);
+		pipe_ctx->stream->bit_depth_params = params;
+		pipe_ctx->opp->funcs->
+			opp_program_bit_depth_reduction(pipe_ctx->opp, &params);
+
+		pipe_ctx->tg->funcs->set_test_pattern(pipe_ctx->tg,
+				CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+				color_depth);
+	}
+	break;
+
+	default:
+	break;
+	}
+}
+
+bool dc_link_dp_set_test_pattern(
+	const struct dc_link *link,
+	enum dp_test_pattern test_pattern,
+	const struct link_training_settings *p_link_settings,
+	const unsigned char *p_custom_pattern,
+	unsigned int cust_pattern_size)
+{
+	struct core_link *core_link = DC_LINK_TO_CORE(link);
+	struct pipe_ctx *pipes =
+			core_link->dc->current_context->res_ctx.pipe_ctx;
+	struct pipe_ctx pipe_ctx = pipes[0];
+	unsigned int lane;
+	unsigned int i;
+	unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
+	union dpcd_training_pattern training_pattern;
+	union test_response test_response;
+	enum dpcd_phy_test_patterns pattern;
+
+	memset(&training_pattern, 0, sizeof(training_pattern));
+	memset(&test_response, 0, sizeof(test_response));
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (pipes[i].stream->sink->link == core_link) {
+			pipe_ctx = pipes[i];
+			break;
+		}
+	}
+
+	/* Reset CRTC Test Pattern if it is currently running and request
+	 * is VideoMode Reset DP Phy Test Pattern if it is currently running
+	 * and request is VideoMode
+	 */
+	if (core_link->public.test_pattern_enabled && test_pattern ==
+			DP_TEST_PATTERN_VIDEO_MODE) {
+		/* Set CRTC Test Pattern */
+		set_crtc_test_pattern(core_link, &pipe_ctx, test_pattern);
+		dp_set_hw_test_pattern(core_link, test_pattern,
+				(uint8_t *)p_custom_pattern,
+				(uint32_t)cust_pattern_size);
+
+		/* Unblank Stream */
+		core_link->dc->hwss.unblank_stream(
+			&pipe_ctx,
+			&core_link->public.verified_link_cap);
+		/* TODO:m_pHwss->MuteAudioEndpoint
+		 * (pPathMode->pDisplayPath, false);
+		 */
+
+		/* Reset Test Pattern state */
+		core_link->public.test_pattern_enabled = false;
+
+		return true;
+	}
+
+	/* Check for PHY Test Patterns */
+	if (is_dp_phy_pattern(test_pattern)) {
+		/* Set DPCD Lane Settings before running test pattern */
+		if (p_link_settings != NULL) {
+			dp_set_hw_lane_settings(core_link, p_link_settings);
+			dpcd_set_lane_settings(core_link, p_link_settings);
+		}
+
+		/* Blank stream if running test pattern */
+		if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+			/*TODO:
+			 * m_pHwss->
+			 * MuteAudioEndpoint(pPathMode->pDisplayPath, true);
+			 */
+			/* Blank stream */
+			pipes->stream_enc->funcs->dp_blank(pipe_ctx.stream_enc);
+		}
+
+		dp_set_hw_test_pattern(core_link, test_pattern,
+				(uint8_t *)p_custom_pattern,
+				(uint32_t)cust_pattern_size);
+
+		if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+			/* Set Test Pattern state */
+			core_link->public.test_pattern_enabled = true;
+			if (p_link_settings != NULL)
+				dpcd_set_link_settings(core_link,
+						p_link_settings);
+		}
+
+		switch (test_pattern) {
+		case DP_TEST_PATTERN_VIDEO_MODE:
+			pattern = PHY_TEST_PATTERN_NONE;
+		break;
+		case DP_TEST_PATTERN_D102:
+			pattern = PHY_TEST_PATTERN_D10_2;
+		break;
+		case DP_TEST_PATTERN_SYMBOL_ERROR:
+			pattern = PHY_TEST_PATTERN_SYMBOL_ERROR;
+		break;
+		case DP_TEST_PATTERN_PRBS7:
+			pattern = PHY_TEST_PATTERN_PRBS7;
+		break;
+		case DP_TEST_PATTERN_80BIT_CUSTOM:
+			pattern = PHY_TEST_PATTERN_80BIT_CUSTOM;
+		break;
+		case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
+			pattern = PHY_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
+		break;
+		default:
+			return false;
+		}
+
+		if (test_pattern == DP_TEST_PATTERN_VIDEO_MODE
+		/*TODO:&& !pPathMode->pDisplayPath->IsTargetPoweredOn()*/)
+			return false;
+
+		if (core_link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+			/* tell receiver that we are sending qualification
+			 * pattern DP 1.2 or later - DP receiver's link quality
+			 * pattern is set using DPCD LINK_QUAL_LANEx_SET
+			 * register (0x10B~0x10E)\
+			 */
+			for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++)
+				link_qual_pattern[lane] =
+						(unsigned char)(pattern);
+
+			core_link_write_dpcd(core_link,
+					DPCD_ADDRESS_LINK_QUAL_LANE0_SET,
+					link_qual_pattern,
+					sizeof(link_qual_pattern));
+		} else if (core_link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_10 ||
+				core_link->dpcd_caps.dpcd_rev.raw == 0) {
+			/* tell receiver that we are sending qualification
+			 * pattern DP 1.1a or earlier - DP receiver's link
+			 * quality pattern is set using
+			 * DPCD TRAINING_PATTERN_SET -> LINK_QUAL_PATTERN_SET
+			 * register (0x102). We will use v_1.3 when we are
+			 * setting test pattern for DP 1.1.
+			 */
+			core_link_read_dpcd(core_link,
+					DPCD_ADDRESS_TRAINING_PATTERN_SET,
+					&training_pattern.raw,
+					sizeof(training_pattern));
+			training_pattern.v1_3.LINK_QUAL_PATTERN_SET = pattern;
+			core_link_write_dpcd(core_link,
+					DPCD_ADDRESS_TRAINING_PATTERN_SET,
+					&training_pattern.raw,
+					sizeof(training_pattern));
+		}
+	} else {
+	/* CRTC Patterns */
+		set_crtc_test_pattern(core_link, &pipe_ctx, test_pattern);
+		/* Set Test Pattern state */
+		core_link->public.test_pattern_enabled = true;
+
+		/* If this is called because of compliance test request,
+		 * we respond ack here.
+		 */
+		if (core_link->public.compliance_test_state.bits.
+				SET_TEST_PATTERN_PENDING == 1) {
+			core_link->public.compliance_test_state.bits.
+						SET_TEST_PATTERN_PENDING = 0;
+			test_response.bits.ACK = 1;
+			core_link_write_dpcd(core_link,
+					DPCD_ADDRESS_TEST_RESPONSE,
+					&test_response.raw,
+					sizeof(test_response));
+		}
+	}
+
+	return true;
+}

+ 222 - 0
drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c

@@ -0,0 +1,222 @@
+/* Copyright 2015 Advanced Micro Devices, Inc. */
+
+
+#include "dm_services.h"
+#include "dc.h"
+#include "inc/core_dc.h"
+#include "include/ddc_service_types.h"
+#include "include/i2caux_interface.h"
+#include "link_hwss.h"
+#include "hw_sequencer.h"
+#include "dc_link_dp.h"
+#include "dc_link_ddc.h"
+#include "dm_helpers.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+
+enum dc_status core_link_read_dpcd(
+	struct core_link* link,
+	uint32_t address,
+	uint8_t *data,
+	uint32_t size)
+{
+	if (!dm_helpers_dp_read_dpcd(link->ctx,
+			&link->public,
+			address, data, size))
+			return DC_ERROR_UNEXPECTED;
+
+	return DC_OK;
+}
+
+enum dc_status core_link_write_dpcd(
+	struct core_link* link,
+	uint32_t address,
+	const uint8_t *data,
+	uint32_t size)
+{
+	if (!dm_helpers_dp_write_dpcd(link->ctx,
+			&link->public,
+			address, data, size))
+				return DC_ERROR_UNEXPECTED;
+
+	return DC_OK;
+}
+
+void dp_receiver_power_ctrl(struct core_link *link, bool on)
+{
+	uint8_t state;
+
+	state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3;
+
+	core_link_write_dpcd(link, DPCD_ADDRESS_POWER_STATE, &state,
+			sizeof(state));
+}
+
+void dp_enable_link_phy(
+	struct core_link *link,
+	enum signal_type signal,
+	enum clock_source_id clock_source,
+	const struct dc_link_settings *link_settings)
+{
+	struct link_encoder *link_enc = link->link_enc;
+
+	if (dc_is_dp_sst_signal(signal)) {
+		if (signal == SIGNAL_TYPE_EDP) {
+			link_enc->funcs->power_control(link_enc, true);
+			link_enc->funcs->backlight_control(link_enc, true);
+		}
+
+		link_enc->funcs->enable_dp_output(
+						link_enc,
+						link_settings,
+						clock_source);
+	} else {
+		link_enc->funcs->enable_dp_mst_output(
+						link_enc,
+						link_settings,
+						clock_source);
+	}
+
+	dp_receiver_power_ctrl(link, true);
+}
+
+void dp_disable_link_phy(struct core_link *link, enum signal_type signal)
+{
+	if (!link->wa_flags.dp_keep_receiver_powered)
+		dp_receiver_power_ctrl(link, false);
+
+	if (signal == SIGNAL_TYPE_EDP)
+		link->link_enc->funcs->backlight_control(link->link_enc, false);
+
+	link->link_enc->funcs->disable_output(link->link_enc, signal);
+
+	/* Clear current link setting.*/
+	memset(&link->public.cur_link_settings, 0,
+			sizeof(link->public.cur_link_settings));
+}
+
+void dp_disable_link_phy_mst(struct core_link *link, enum signal_type signal)
+{
+	/* MST disable link only when no stream use the link */
+	if (link->mst_stream_alloc_table.stream_count > 0)
+		return;
+
+	dp_disable_link_phy(link, signal);
+}
+
+bool dp_set_hw_training_pattern(
+	struct core_link *link,
+	enum hw_dp_training_pattern pattern)
+{
+	enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+
+	switch (pattern) {
+	case HW_DP_TRAINING_PATTERN_1:
+		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1;
+		break;
+	case HW_DP_TRAINING_PATTERN_2:
+		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2;
+		break;
+	case HW_DP_TRAINING_PATTERN_3:
+		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3;
+		break;
+	case HW_DP_TRAINING_PATTERN_4:
+		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
+		break;
+	default:
+		break;
+	}
+
+	dp_set_hw_test_pattern(link, test_pattern, NULL, 0);
+
+	return true;
+}
+
+void dp_set_hw_lane_settings(
+	struct core_link *link,
+	const struct link_training_settings *link_settings)
+{
+	struct link_encoder *encoder = link->link_enc;
+
+	/* call Encoder to set lane settings */
+	encoder->funcs->dp_set_lane_settings(encoder, link_settings);
+}
+
+enum dp_panel_mode dp_get_panel_mode(struct core_link *link)
+{
+	/* We need to explicitly check that connector
+	 * is not DP. Some Travis_VGA get reported
+	 * by video bios as DP.
+	 */
+	if (link->public.connector_signal != SIGNAL_TYPE_DISPLAY_PORT) {
+
+		switch (link->dpcd_caps.branch_dev_id) {
+		case DP_BRANCH_DEVICE_ID_2:
+			if (strncmp(
+				link->dpcd_caps.branch_dev_name,
+				DP_VGA_LVDS_CONVERTER_ID_2,
+				sizeof(
+				link->dpcd_caps.
+				branch_dev_name)) == 0) {
+				return DP_PANEL_MODE_SPECIAL;
+			}
+			break;
+		case DP_BRANCH_DEVICE_ID_3:
+			if (strncmp(link->dpcd_caps.branch_dev_name,
+				DP_VGA_LVDS_CONVERTER_ID_3,
+				sizeof(
+				link->dpcd_caps.
+				branch_dev_name)) == 0) {
+				return DP_PANEL_MODE_SPECIAL;
+			}
+			break;
+		default:
+			break;
+		}
+
+		if (link->dpcd_caps.panel_mode_edp) {
+			return DP_PANEL_MODE_EDP;
+		}
+	}
+
+	return DP_PANEL_MODE_DEFAULT;
+}
+
+void dp_set_hw_test_pattern(
+	struct core_link *link,
+	enum dp_test_pattern test_pattern,
+	uint8_t *custom_pattern,
+	uint32_t custom_pattern_size)
+{
+	struct encoder_set_dp_phy_pattern_param pattern_param = {0};
+	struct link_encoder *encoder = link->link_enc;
+
+	pattern_param.dp_phy_pattern = test_pattern;
+	pattern_param.custom_pattern = custom_pattern;
+	pattern_param.custom_pattern_size = custom_pattern_size;
+	pattern_param.dp_panel_mode = dp_get_panel_mode(link);
+
+	encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param);
+}
+
+
+void dp_retrain_link(struct core_link *link)
+{
+	struct pipe_ctx *pipes = link->dc->current_context->res_ctx.pipe_ctx;
+	unsigned int i;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (pipes[i].stream_enc != NULL) {
+			dm_delay_in_microseconds(link->ctx, 100);
+			pipes->stream_enc->funcs->dp_blank(pipes[i].stream_enc);
+			link->dc->hwss.disable_stream(&pipes[i]);
+			dc_link_dp_perform_link_training(
+					&link->public,
+					&link->public.verified_link_cap,
+					true);
+			link->dc->hwss.enable_stream(&pipes[i]);
+			link->dc->hwss.unblank_stream(&pipes[i],
+					&link->public.verified_link_cap);
+		}
+	}
+}

+ 1934 - 0
drivers/gpu/drm/amd/display/dc/core/dc_resource.c

@@ -0,0 +1,1934 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "link_encoder.h"
+#include "stream_encoder.h"
+#include "opp.h"
+#include "timing_generator.h"
+#include "transform.h"
+#include "set_mode_types.h"
+
+#include "virtual/virtual_stream_encoder.h"
+
+#include "dce80/dce80_resource.h"
+#include "dce100/dce100_resource.h"
+#include "dce110/dce110_resource.h"
+#include "dce112/dce112_resource.h"
+
+enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
+{
+	enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+	switch (asic_id.chip_family) {
+
+	case FAMILY_CI:
+	case FAMILY_KV:
+		dc_version = DCE_VERSION_8_0;
+		break;
+	case FAMILY_CZ:
+		dc_version = DCE_VERSION_11_0;
+		break;
+
+	case FAMILY_VI:
+		if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
+				ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
+			dc_version = DCE_VERSION_10_0;
+			break;
+		}
+		if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
+				ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev)) {
+			dc_version = DCE_VERSION_11_2;
+		}
+		break;
+	default:
+		dc_version = DCE_VERSION_UNKNOWN;
+		break;
+	}
+	return dc_version;
+}
+
+struct resource_pool *dc_create_resource_pool(
+				struct core_dc *dc,
+				int num_virtual_links,
+				enum dce_version dc_version,
+				struct hw_asic_id asic_id)
+{
+
+	switch (dc_version) {
+	case DCE_VERSION_8_0:
+		return dce80_create_resource_pool(
+			num_virtual_links, dc);
+	case DCE_VERSION_10_0:
+		return dce100_create_resource_pool(
+				num_virtual_links, dc);
+	case DCE_VERSION_11_0:
+		return dce110_create_resource_pool(
+			num_virtual_links, dc, asic_id);
+	case DCE_VERSION_11_2:
+		return dce112_create_resource_pool(
+			num_virtual_links, dc);
+	default:
+		break;
+	}
+
+	return false;
+}
+
+void dc_destroy_resource_pool(struct core_dc *dc)
+{
+	if (dc) {
+		if (dc->res_pool)
+			dc->res_pool->funcs->destroy(&dc->res_pool);
+
+		if (dc->hwseq)
+			dm_free(dc->hwseq);
+	}
+}
+
+static void update_num_audio(
+	const struct resource_straps *straps,
+	unsigned int *num_audio,
+	struct audio_support *aud_support)
+{
+	if (straps->hdmi_disable == 0) {
+		aud_support->hdmi_audio_native = true;
+		aud_support->hdmi_audio_on_dongle = true;
+		aud_support->dp_audio = true;
+	} else {
+		if (straps->dc_pinstraps_audio & 0x2) {
+			aud_support->hdmi_audio_on_dongle = true;
+			aud_support->dp_audio = true;
+		} else {
+			aud_support->dp_audio = true;
+		}
+	}
+
+	switch (straps->audio_stream_number) {
+	case 0: /* multi streams supported */
+		break;
+	case 1: /* multi streams not supported */
+		*num_audio = 1;
+		break;
+	default:
+		DC_ERR("DC: unexpected audio fuse!\n");
+	};
+}
+
+bool resource_construct(
+	unsigned int num_virtual_links,
+	struct core_dc *dc,
+	struct resource_pool *pool,
+	const struct resource_create_funcs *create_funcs)
+{
+	struct dc_context *ctx = dc->ctx;
+	const struct resource_caps *caps = pool->res_cap;
+	int i;
+	unsigned int num_audio = caps->num_audio;
+	struct resource_straps straps = {0};
+
+	if (create_funcs->read_dce_straps)
+		create_funcs->read_dce_straps(dc->ctx, &straps);
+
+	pool->audio_count = 0;
+	if (create_funcs->create_audio) {
+		/* find the total number of streams available via the
+		 * AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+		 * registers (one for each pin) starting from pin 1
+		 * up to the max number of audio pins.
+		 * We stop on the first pin where
+		 * PORT_CONNECTIVITY == 1 (as instructed by HW team).
+		 */
+		update_num_audio(&straps, &num_audio, &pool->audio_support);
+		for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
+			struct audio *aud = create_funcs->create_audio(ctx, i);
+
+			if (aud == NULL) {
+				DC_ERR("DC: failed to create audio!\n");
+				return false;
+			}
+
+			if (!aud->funcs->endpoint_valid(aud)) {
+				aud->funcs->destroy(&aud);
+				break;
+			}
+
+			pool->audios[i] = aud;
+			pool->audio_count++;
+		}
+	}
+
+	pool->stream_enc_count = 0;
+	if (create_funcs->create_stream_encoder) {
+		for (i = 0; i < caps->num_stream_encoder; i++) {
+			pool->stream_enc[i] = create_funcs->create_stream_encoder(i, ctx);
+			if (pool->stream_enc[i] == NULL)
+				DC_ERR("DC: failed to create stream_encoder!\n");
+			pool->stream_enc_count++;
+		}
+	}
+
+	for (i = 0; i < num_virtual_links; i++) {
+		pool->stream_enc[pool->stream_enc_count] =
+			virtual_stream_encoder_create(
+					ctx, ctx->dc_bios);
+		if (pool->stream_enc[pool->stream_enc_count] == NULL) {
+			DC_ERR("DC: failed to create stream_encoder!\n");
+			return false;
+		}
+		pool->stream_enc_count++;
+	}
+
+	dc->hwseq = create_funcs->create_hwseq(ctx);
+
+	return true;
+}
+
+
+void resource_unreference_clock_source(
+		struct resource_context *res_ctx,
+		struct clock_source *clock_source)
+{
+	int i;
+	for (i = 0; i < res_ctx->pool->clk_src_count; i++) {
+		if (res_ctx->pool->clock_sources[i] != clock_source)
+			continue;
+
+		res_ctx->clock_source_ref_count[i]--;
+
+		if (res_ctx->clock_source_ref_count[i] == 0)
+			clock_source->funcs->cs_power_down(clock_source);
+
+		break;
+	}
+
+	if (res_ctx->pool->dp_clock_source == clock_source) {
+		res_ctx->dp_clock_source_ref_count--;
+
+		if (res_ctx->dp_clock_source_ref_count == 0)
+			clock_source->funcs->cs_power_down(clock_source);
+	}
+}
+
+void resource_reference_clock_source(
+		struct resource_context *res_ctx,
+		struct clock_source *clock_source)
+{
+	int i;
+	for (i = 0; i < res_ctx->pool->clk_src_count; i++) {
+		if (res_ctx->pool->clock_sources[i] != clock_source)
+			continue;
+
+		res_ctx->clock_source_ref_count[i]++;
+		break;
+	}
+
+	if (res_ctx->pool->dp_clock_source == clock_source)
+		res_ctx->dp_clock_source_ref_count++;
+}
+
+bool resource_are_streams_timing_synchronizable(
+	const struct core_stream *stream1,
+	const struct core_stream *stream2)
+{
+	if (stream1->public.timing.h_total != stream2->public.timing.h_total)
+		return false;
+
+	if (stream1->public.timing.v_total != stream2->public.timing.v_total)
+		return false;
+
+	if (stream1->public.timing.h_addressable
+				!= stream2->public.timing.h_addressable)
+		return false;
+
+	if (stream1->public.timing.v_addressable
+				!= stream2->public.timing.v_addressable)
+		return false;
+
+	if (stream1->public.timing.pix_clk_khz
+				!= stream2->public.timing.pix_clk_khz)
+		return false;
+
+	if (stream1->phy_pix_clk != stream2->phy_pix_clk
+			&& !dc_is_dp_signal(stream1->signal)
+			&& !dc_is_dp_signal(stream2->signal))
+		return false;
+
+	return true;
+}
+
+static bool is_sharable_clk_src(
+	const struct pipe_ctx *pipe_with_clk_src,
+	const struct pipe_ctx *pipe)
+{
+	if (pipe_with_clk_src->clock_source == NULL)
+		return false;
+
+	if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL)
+		return false;
+
+	if (dc_is_dp_signal(pipe_with_clk_src->stream->signal))
+		return false;
+
+	if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
+			&& dc_is_dvi_signal(pipe->stream->signal))
+		return false;
+
+	if (dc_is_hdmi_signal(pipe->stream->signal)
+			&& dc_is_dvi_signal(pipe_with_clk_src->stream->signal))
+		return false;
+
+	if (!resource_are_streams_timing_synchronizable(
+			pipe_with_clk_src->stream, pipe->stream))
+		return false;
+
+	return true;
+}
+
+struct clock_source *resource_find_used_clk_src_for_sharing(
+					struct resource_context *res_ctx,
+					struct pipe_ctx *pipe_ctx)
+{
+	int i;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (is_sharable_clk_src(&res_ctx->pipe_ctx[i], pipe_ctx))
+			return res_ctx->pipe_ctx[i].clock_source;
+	}
+
+	return NULL;
+}
+
+static enum pixel_format convert_pixel_format_to_dalsurface(
+		enum surface_pixel_format surface_pixel_format)
+{
+	enum pixel_format dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
+
+	switch (surface_pixel_format) {
+	case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+		dal_pixel_format = PIXEL_FORMAT_INDEX8;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+		dal_pixel_format = PIXEL_FORMAT_RGB565;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+		dal_pixel_format = PIXEL_FORMAT_RGB565;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+		dal_pixel_format = PIXEL_FORMAT_ARGB8888;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_BGRA8888:
+		dal_pixel_format = PIXEL_FORMAT_ARGB8888;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+		dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+		dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+		dal_pixel_format = PIXEL_FORMAT_ARGB2101010_XRBIAS;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+		dal_pixel_format = PIXEL_FORMAT_FP16;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+		dal_pixel_format = PIXEL_FORMAT_420BPP12;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+		dal_pixel_format = PIXEL_FORMAT_420BPP12;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+	default:
+		dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
+		break;
+	}
+	return dal_pixel_format;
+}
+
+static void rect_swap_helper(struct rect *rect)
+{
+	uint32_t temp = 0;
+
+	temp = rect->height;
+	rect->height = rect->width;
+	rect->width = temp;
+
+	temp = rect->x;
+	rect->x = rect->y;
+	rect->y = temp;
+}
+
+static void calculate_viewport(
+		const struct dc_surface *surface,
+		struct pipe_ctx *pipe_ctx)
+{
+	struct rect stream_src = pipe_ctx->stream->public.src;
+	struct rect src = surface->src_rect;
+	struct rect dst = surface->dst_rect;
+	struct rect surface_clip = surface->clip_rect;
+	struct rect clip = {0};
+
+
+	if (surface->rotation == ROTATION_ANGLE_90 ||
+	    surface->rotation == ROTATION_ANGLE_270) {
+		rect_swap_helper(&src);
+		rect_swap_helper(&dst);
+		rect_swap_helper(&surface_clip);
+		rect_swap_helper(&stream_src);
+	}
+
+	/* The actual clip is an intersection between stream
+	 * source and surface clip
+	 */
+	clip.x = stream_src.x > surface_clip.x ?
+			stream_src.x : surface_clip.x;
+
+	clip.width = stream_src.x + stream_src.width <
+			surface_clip.x + surface_clip.width ?
+			stream_src.x + stream_src.width - clip.x :
+			surface_clip.x + surface_clip.width - clip.x ;
+
+	clip.y = stream_src.y > surface_clip.y ?
+			stream_src.y : surface_clip.y;
+
+	clip.height = stream_src.y + stream_src.height <
+			surface_clip.y + surface_clip.height ?
+			stream_src.y + stream_src.height - clip.y :
+			surface_clip.y + surface_clip.height - clip.y ;
+
+	/* offset = src.ofs + (clip.ofs - dst.ofs) * scl_ratio
+	 * num_pixels = clip.num_pix * scl_ratio
+	 */
+	pipe_ctx->scl_data.viewport.x = src.x + (clip.x - dst.x) *
+			src.width / dst.width;
+	pipe_ctx->scl_data.viewport.width = clip.width *
+			src.width / dst.width;
+
+	pipe_ctx->scl_data.viewport.y = src.y + (clip.y - dst.y) *
+			src.height / dst.height;
+	pipe_ctx->scl_data.viewport.height = clip.height *
+			src.height / dst.height;
+
+	/* Minimum viewport such that 420/422 chroma vp is non 0 */
+	if (pipe_ctx->scl_data.viewport.width < 2)
+		pipe_ctx->scl_data.viewport.width = 2;
+	if (pipe_ctx->scl_data.viewport.height < 2)
+		pipe_ctx->scl_data.viewport.height = 2;
+}
+
+static void calculate_recout(
+		const struct dc_surface *surface,
+		struct pipe_ctx *pipe_ctx)
+{
+	struct core_stream *stream = pipe_ctx->stream;
+	struct rect clip = surface->clip_rect;
+
+	pipe_ctx->scl_data.recout.x = stream->public.dst.x;
+	if (stream->public.src.x < clip.x)
+		pipe_ctx->scl_data.recout.x += (clip.x
+			- stream->public.src.x) * stream->public.dst.width
+						/ stream->public.src.width;
+
+	pipe_ctx->scl_data.recout.width = clip.width *
+			stream->public.dst.width / stream->public.src.width;
+	if (pipe_ctx->scl_data.recout.width + pipe_ctx->scl_data.recout.x >
+			stream->public.dst.x + stream->public.dst.width)
+		pipe_ctx->scl_data.recout.width =
+			stream->public.dst.x + stream->public.dst.width
+						- pipe_ctx->scl_data.recout.x;
+
+	pipe_ctx->scl_data.recout.y = stream->public.dst.y;
+	if (stream->public.src.y < clip.y)
+		pipe_ctx->scl_data.recout.y += (clip.y
+			- stream->public.src.y) * stream->public.dst.height
+						/ stream->public.src.height;
+
+	pipe_ctx->scl_data.recout.height = clip.height *
+			stream->public.dst.height / stream->public.src.height;
+	if (pipe_ctx->scl_data.recout.height + pipe_ctx->scl_data.recout.y >
+			stream->public.dst.y + stream->public.dst.height)
+		pipe_ctx->scl_data.recout.height =
+			stream->public.dst.y + stream->public.dst.height
+						- pipe_ctx->scl_data.recout.y;
+}
+
+static void calculate_scaling_ratios(
+		const struct dc_surface *surface,
+		struct pipe_ctx *pipe_ctx)
+{
+	struct core_stream *stream = pipe_ctx->stream;
+	const uint32_t in_w = stream->public.src.width;
+	const uint32_t in_h = stream->public.src.height;
+	const uint32_t out_w = stream->public.dst.width;
+	const uint32_t out_h = stream->public.dst.height;
+
+	pipe_ctx->scl_data.ratios.horz = dal_fixed31_32_from_fraction(
+					surface->src_rect.width,
+					surface->dst_rect.width);
+	pipe_ctx->scl_data.ratios.vert = dal_fixed31_32_from_fraction(
+					surface->src_rect.height,
+					surface->dst_rect.height);
+
+	if (surface->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE)
+		pipe_ctx->scl_data.ratios.horz.value *= 2;
+	else if (surface->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM)
+		pipe_ctx->scl_data.ratios.vert.value *= 2;
+
+	pipe_ctx->scl_data.ratios.vert.value = div64_s64(
+		pipe_ctx->scl_data.ratios.vert.value * in_h, out_h);
+	pipe_ctx->scl_data.ratios.horz.value = div64_s64(
+		pipe_ctx->scl_data.ratios.horz.value * in_w, out_w);
+
+	pipe_ctx->scl_data.ratios.horz_c = pipe_ctx->scl_data.ratios.horz;
+	pipe_ctx->scl_data.ratios.vert_c = pipe_ctx->scl_data.ratios.vert;
+
+	if (pipe_ctx->scl_data.format == PIXEL_FORMAT_420BPP12) {
+		pipe_ctx->scl_data.ratios.horz_c.value /= 2;
+		pipe_ctx->scl_data.ratios.vert_c.value /= 2;
+	}
+}
+
+bool resource_build_scaling_params(
+	const struct dc_surface *surface,
+	struct pipe_ctx *pipe_ctx)
+{
+	bool res;
+	struct dc_crtc_timing *timing = &pipe_ctx->stream->public.timing;
+	/* Important: scaling ratio calculation requires pixel format,
+	 * lb depth calculation requires recout and taps require scaling ratios.
+	 */
+	pipe_ctx->scl_data.format = convert_pixel_format_to_dalsurface(surface->format);
+
+	calculate_viewport(surface, pipe_ctx);
+
+	if (pipe_ctx->scl_data.viewport.height < 16 || pipe_ctx->scl_data.viewport.width < 16)
+		return false;
+
+	calculate_scaling_ratios(surface, pipe_ctx);
+
+	calculate_recout(surface, pipe_ctx);
+
+	/**
+	 * Setting line buffer pixel depth to 24bpp yields banding
+	 * on certain displays, such as the Sharp 4k
+	 */
+	pipe_ctx->scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
+
+	pipe_ctx->scl_data.h_active = timing->h_addressable;
+	pipe_ctx->scl_data.v_active = timing->v_addressable;
+
+	/* Taps calculations */
+	res = pipe_ctx->xfm->funcs->transform_get_optimal_number_of_taps(
+		pipe_ctx->xfm, &pipe_ctx->scl_data, &surface->scaling_quality);
+
+	if (!res) {
+		/* Try 24 bpp linebuffer */
+		pipe_ctx->scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP;
+
+		res = pipe_ctx->xfm->funcs->transform_get_optimal_number_of_taps(
+			pipe_ctx->xfm, &pipe_ctx->scl_data, &surface->scaling_quality);
+	}
+
+	dm_logger_write(pipe_ctx->stream->ctx->logger, LOG_SCALER,
+				"%s: Viewport:\nheight:%d width:%d x:%d "
+				"y:%d\n dst_rect:\nheight:%d width:%d x:%d "
+				"y:%d\n",
+				__func__,
+				pipe_ctx->scl_data.viewport.height,
+				pipe_ctx->scl_data.viewport.width,
+				pipe_ctx->scl_data.viewport.x,
+				pipe_ctx->scl_data.viewport.y,
+				surface->dst_rect.height,
+				surface->dst_rect.width,
+				surface->dst_rect.x,
+				surface->dst_rect.y);
+
+	return res;
+}
+
+
+enum dc_status resource_build_scaling_params_for_context(
+	const struct core_dc *dc,
+	struct validate_context *context)
+{
+	int i;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (context->res_ctx.pipe_ctx[i].surface != NULL &&
+				context->res_ctx.pipe_ctx[i].stream != NULL)
+			if (!resource_build_scaling_params(
+				&context->res_ctx.pipe_ctx[i].surface->public,
+				&context->res_ctx.pipe_ctx[i]))
+				return DC_FAIL_BANDWIDTH_VALIDATE;
+	}
+
+	return DC_OK;
+}
+
+static void detach_surfaces_for_target(
+		struct validate_context *context,
+		const struct dc_target *dc_target)
+{
+	int i;
+	struct core_stream *stream = DC_STREAM_TO_CORE(dc_target->streams[0]);
+
+	for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
+		struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
+		if (cur_pipe->stream == stream) {
+			cur_pipe->surface = NULL;
+			cur_pipe->top_pipe = NULL;
+			cur_pipe->bottom_pipe = NULL;
+		}
+	}
+}
+
+struct pipe_ctx *find_idle_secondary_pipe(struct resource_context *res_ctx)
+{
+	int i;
+	struct pipe_ctx *secondary_pipe = NULL;
+
+	/*
+	 * search backwards for the second pipe to keep pipe
+	 * assignment more consistent
+	 */
+
+	for (i = res_ctx->pool->pipe_count - 1; i >= 0; i--) {
+		if (res_ctx->pipe_ctx[i].stream == NULL) {
+			secondary_pipe = &res_ctx->pipe_ctx[i];
+			secondary_pipe->pipe_idx = i;
+			break;
+		}
+	}
+
+
+	return secondary_pipe;
+}
+
+struct pipe_ctx *resource_get_head_pipe_for_stream(
+		struct resource_context *res_ctx,
+		const struct core_stream *stream)
+{
+	int i;
+	for (i = 0; i < res_ctx->pool->pipe_count; i++) {
+		if (res_ctx->pipe_ctx[i].stream == stream &&
+				!res_ctx->pipe_ctx[i].top_pipe) {
+			return &res_ctx->pipe_ctx[i];
+			break;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * A free_pipe for a target is defined here as a pipe with a stream that belongs
+ * to the target but has no surface attached yet
+ */
+static struct pipe_ctx *acquire_free_pipe_for_target(
+		struct resource_context *res_ctx,
+		const struct dc_target *dc_target)
+{
+	int i;
+	struct core_stream *stream = DC_STREAM_TO_CORE(dc_target->streams[0]);
+
+	struct pipe_ctx *head_pipe = NULL;
+
+	/* Find head pipe, which has the back end set up*/
+
+	head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
+
+	if (!head_pipe)
+		ASSERT(0);
+
+	if (!head_pipe->surface)
+		return head_pipe;
+
+	/* Re-use pipe already acquired for this stream if available*/
+	for (i = res_ctx->pool->pipe_count - 1; i >= 0; i--) {
+		if (res_ctx->pipe_ctx[i].stream == stream &&
+				!res_ctx->pipe_ctx[i].surface) {
+			return &res_ctx->pipe_ctx[i];
+		}
+	}
+
+	/*
+	 * At this point we have no re-useable pipe for this stream and we need
+	 * to acquire an idle one to satisfy the request
+	 */
+
+	if(!res_ctx->pool->funcs->acquire_idle_pipe_for_layer)
+		return NULL;
+
+	return res_ctx->pool->funcs->acquire_idle_pipe_for_layer(res_ctx, stream);
+
+}
+
+static void release_free_pipes_for_target(
+		struct resource_context *res_ctx,
+		const struct dc_target *dc_target)
+{
+	int i;
+	struct core_stream *stream = DC_STREAM_TO_CORE(dc_target->streams[0]);
+
+	for (i = res_ctx->pool->pipe_count - 1; i >= 0; i--) {
+		if (res_ctx->pipe_ctx[i].stream == stream &&
+				!res_ctx->pipe_ctx[i].surface) {
+			res_ctx->pipe_ctx[i].stream = NULL;
+		}
+	}
+}
+
+bool resource_attach_surfaces_to_context(
+		const struct dc_surface * const *surfaces,
+		int surface_count,
+		const struct dc_target *dc_target,
+		struct validate_context *context)
+{
+	int i;
+	struct pipe_ctx *tail_pipe;
+	struct dc_target_status *target_status = NULL;
+
+
+	if (surface_count > MAX_SURFACE_NUM) {
+		dm_error("Surface: can not attach %d surfaces! Maximum is: %d\n",
+			surface_count, MAX_SURFACE_NUM);
+		return false;
+	}
+
+	for (i = 0; i < context->target_count; i++)
+		if (&context->targets[i]->public == dc_target) {
+			target_status = &context->target_status[i];
+			break;
+		}
+	if (target_status == NULL) {
+		dm_error("Existing target not found; failed to attach surfaces\n");
+		return false;
+	}
+
+	/* retain new surfaces */
+	for (i = 0; i < surface_count; i++)
+		dc_surface_retain(surfaces[i]);
+
+	detach_surfaces_for_target(context, dc_target);
+
+	/* release existing surfaces*/
+	for (i = 0; i < target_status->surface_count; i++)
+		dc_surface_release(target_status->surfaces[i]);
+
+	for (i = surface_count; i < target_status->surface_count; i++)
+		target_status->surfaces[i] = NULL;
+
+	target_status->surface_count = 0;
+
+	if (surface_count == 0)
+		return true;
+
+	tail_pipe = NULL;
+	for (i = 0; i < surface_count; i++) {
+		struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
+		struct pipe_ctx *free_pipe = acquire_free_pipe_for_target(
+				&context->res_ctx, dc_target);
+
+		if (!free_pipe) {
+			target_status->surfaces[i] = NULL;
+			return false;
+		}
+
+		free_pipe->surface = surface;
+
+		if (tail_pipe) {
+			free_pipe->top_pipe = tail_pipe;
+			tail_pipe->bottom_pipe = free_pipe;
+		}
+
+		tail_pipe = free_pipe;
+	}
+
+	release_free_pipes_for_target(&context->res_ctx, dc_target);
+
+	/* assign new surfaces*/
+	for (i = 0; i < surface_count; i++)
+		target_status->surfaces[i] = surfaces[i];
+
+	target_status->surface_count = surface_count;
+
+	return true;
+}
+
+
+static bool is_timing_changed(const struct core_stream *cur_stream,
+		const struct core_stream *new_stream)
+{
+	if (cur_stream == NULL)
+		return true;
+
+	/* If sink pointer changed, it means this is a hotplug, we should do
+	 * full hw setting.
+	 */
+	if (cur_stream->sink != new_stream->sink)
+		return true;
+
+	/* If output color space is changed, need to reprogram info frames */
+	if (cur_stream->public.output_color_space !=
+			new_stream->public.output_color_space)
+		return true;
+
+	return memcmp(
+		&cur_stream->public.timing,
+		&new_stream->public.timing,
+		sizeof(struct dc_crtc_timing)) != 0;
+}
+
+static bool are_stream_backends_same(
+	const struct core_stream *stream_a, const struct core_stream *stream_b)
+{
+	if (stream_a == stream_b)
+		return true;
+
+	if (stream_a == NULL || stream_b == NULL)
+		return false;
+
+	if (is_timing_changed(stream_a, stream_b))
+		return false;
+
+	return true;
+}
+
+bool is_target_unchanged(
+	const struct core_target *old_target, const struct core_target *target)
+{
+	int i;
+
+	if (old_target == target)
+		return true;
+	if (old_target->public.stream_count != target->public.stream_count)
+		return false;
+
+	for (i = 0; i < old_target->public.stream_count; i++) {
+		const struct core_stream *old_stream = DC_STREAM_TO_CORE(
+				old_target->public.streams[i]);
+		const struct core_stream *stream = DC_STREAM_TO_CORE(
+				target->public.streams[i]);
+
+		if (!are_stream_backends_same(old_stream, stream))
+			return false;
+	}
+
+	return true;
+}
+
+bool resource_validate_attach_surfaces(
+		const struct dc_validation_set set[],
+		int set_count,
+		const struct validate_context *old_context,
+		struct validate_context *context)
+{
+	int i, j;
+
+	for (i = 0; i < set_count; i++) {
+		for (j = 0; j < old_context->target_count; j++)
+			if (is_target_unchanged(
+					old_context->targets[j],
+					context->targets[i])) {
+				if (!resource_attach_surfaces_to_context(
+						old_context->target_status[j].surfaces,
+						old_context->target_status[j].surface_count,
+						&context->targets[i]->public,
+						context))
+					return false;
+				context->target_status[i] = old_context->target_status[j];
+			}
+		if (set[i].surface_count != 0)
+			if (!resource_attach_surfaces_to_context(
+					set[i].surfaces,
+					set[i].surface_count,
+					&context->targets[i]->public,
+					context))
+				return false;
+
+	}
+
+	return true;
+}
+
+/* Maximum TMDS single link pixel clock 165MHz */
+#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000
+
+static void set_stream_engine_in_use(
+		struct resource_context *res_ctx,
+		struct stream_encoder *stream_enc)
+{
+	int i;
+
+	for (i = 0; i < res_ctx->pool->stream_enc_count; i++) {
+		if (res_ctx->pool->stream_enc[i] == stream_enc)
+			res_ctx->is_stream_enc_acquired[i] = true;
+	}
+}
+
+/* TODO: release audio object */
+static void set_audio_in_use(
+		struct resource_context *res_ctx,
+		struct audio *audio)
+{
+	int i;
+	for (i = 0; i < res_ctx->pool->audio_count; i++) {
+		if (res_ctx->pool->audios[i] == audio) {
+			res_ctx->is_audio_acquired[i] = true;
+		}
+	}
+}
+
+static int acquire_first_free_pipe(
+		struct resource_context *res_ctx,
+		struct core_stream *stream)
+{
+	int i;
+
+	for (i = 0; i < res_ctx->pool->pipe_count; i++) {
+		if (!res_ctx->pipe_ctx[i].stream) {
+			struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+			pipe_ctx->tg = res_ctx->pool->timing_generators[i];
+			pipe_ctx->mi = res_ctx->pool->mis[i];
+			pipe_ctx->ipp = res_ctx->pool->ipps[i];
+			pipe_ctx->xfm = res_ctx->pool->transforms[i];
+			pipe_ctx->opp = res_ctx->pool->opps[i];
+			pipe_ctx->dis_clk = res_ctx->pool->display_clock;
+			pipe_ctx->pipe_idx = i;
+
+			pipe_ctx->stream = stream;
+			return i;
+		}
+	}
+	return -1;
+}
+
+static struct stream_encoder *find_first_free_match_stream_enc_for_link(
+		struct resource_context *res_ctx,
+		struct core_stream *stream)
+{
+	int i;
+	int j = -1;
+	struct core_link *link = stream->sink->link;
+
+	for (i = 0; i < res_ctx->pool->stream_enc_count; i++) {
+		if (!res_ctx->is_stream_enc_acquired[i] &&
+					res_ctx->pool->stream_enc[i]) {
+			/* Store first available for MST second display
+			 * in daisy chain use case */
+			j = i;
+			if (res_ctx->pool->stream_enc[i]->id ==
+					link->link_enc->preferred_engine)
+				return res_ctx->pool->stream_enc[i];
+		}
+	}
+
+	/*
+	 * below can happen in cases when stream encoder is acquired:
+	 * 1) for second MST display in chain, so preferred engine already
+	 * acquired;
+	 * 2) for another link, which preferred engine already acquired by any
+	 * MST configuration.
+	 *
+	 * If signal is of DP type and preferred engine not found, return last available
+	 *
+	 * TODO - This is just a patch up and a generic solution is
+	 * required for non DP connectors.
+	 */
+
+	if (j >= 0 && dc_is_dp_signal(stream->signal))
+		return res_ctx->pool->stream_enc[j];
+
+	return NULL;
+}
+
+static struct audio *find_first_free_audio(struct resource_context *res_ctx)
+{
+	int i;
+	for (i = 0; i < res_ctx->pool->audio_count; i++) {
+		if (res_ctx->is_audio_acquired[i] == false) {
+			return res_ctx->pool->audios[i];
+		}
+	}
+
+	return 0;
+}
+
+static void update_stream_signal(struct core_stream *stream)
+{
+	const struct dc_sink *dc_sink = stream->public.sink;
+
+	stream->signal = dc_sink->sink_signal;
+	/* For asic supports dual link DVI, we should adjust signal type
+	 * based on timing pixel clock. If pixel clock more than 165Mhz,
+	 * signal is dual link, otherwise, single link.
+	 */
+	if (dc_sink->sink_signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
+			dc_sink->sink_signal == SIGNAL_TYPE_DVI_DUAL_LINK) {
+		if (stream->public.timing.pix_clk_khz >
+						TMDS_MAX_PIXEL_CLOCK_IN_KHZ)
+			stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+		else
+			stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+	}
+}
+
+bool resource_is_stream_unchanged(
+	const struct validate_context *old_context, struct core_stream *stream)
+{
+	int i, j;
+
+	for (i = 0; i < old_context->target_count; i++) {
+		struct core_target *old_target = old_context->targets[i];
+
+		for (j = 0; j < old_target->public.stream_count; j++) {
+			struct core_stream *old_stream =
+				DC_STREAM_TO_CORE(old_target->public.streams[j]);
+
+			if (are_stream_backends_same(old_stream, stream))
+				return true;
+		}
+	}
+
+	return false;
+}
+
+static void copy_pipe_ctx(
+	const struct pipe_ctx *from_pipe_ctx, struct pipe_ctx *to_pipe_ctx)
+{
+	struct core_surface *surface = to_pipe_ctx->surface;
+	struct core_stream *stream = to_pipe_ctx->stream;
+
+	*to_pipe_ctx = *from_pipe_ctx;
+	to_pipe_ctx->stream = stream;
+	if (surface != NULL)
+		to_pipe_ctx->surface = surface;
+}
+
+static struct core_stream *find_pll_sharable_stream(
+		const struct core_stream *stream_needs_pll,
+		struct validate_context *context)
+{
+	int i, j;
+
+	for (i = 0; i < context->target_count; i++) {
+		struct core_target *target = context->targets[i];
+
+		for (j = 0; j < target->public.stream_count; j++) {
+			struct core_stream *stream_has_pll =
+				DC_STREAM_TO_CORE(target->public.streams[j]);
+
+			/* We are looking for non dp, non virtual stream */
+			if (resource_are_streams_timing_synchronizable(
+						stream_needs_pll, stream_has_pll)
+				&& !dc_is_dp_signal(stream_has_pll->signal)
+				&& stream_has_pll->sink->link->public.connector_signal
+							!= SIGNAL_TYPE_VIRTUAL)
+					return stream_has_pll;
+		}
+	}
+
+	return NULL;
+}
+
+static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
+{
+	uint32_t pix_clk = timing->pix_clk_khz;
+	uint32_t normalized_pix_clk = pix_clk;
+
+	if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+		pix_clk /= 2;
+
+	switch (timing->display_color_depth) {
+	case COLOR_DEPTH_888:
+		normalized_pix_clk = pix_clk;
+		break;
+	case COLOR_DEPTH_101010:
+		normalized_pix_clk = (pix_clk * 30) / 24;
+		break;
+	case COLOR_DEPTH_121212:
+		normalized_pix_clk = (pix_clk * 36) / 24;
+		break;
+	case COLOR_DEPTH_161616:
+		normalized_pix_clk = (pix_clk * 48) / 24;
+		break;
+	default:
+		ASSERT(0);
+		break;
+	}
+
+	return normalized_pix_clk;
+}
+
+static void calculate_phy_pix_clks(
+		const struct core_dc *dc,
+		struct validate_context *context)
+{
+	int i, j;
+
+	for (i = 0; i < context->target_count; i++) {
+		struct core_target *target = context->targets[i];
+
+		for (j = 0; j < target->public.stream_count; j++) {
+			struct core_stream *stream =
+				DC_STREAM_TO_CORE(target->public.streams[j]);
+
+			update_stream_signal(stream);
+
+			/* update actual pixel clock on all streams */
+			if (dc_is_hdmi_signal(stream->signal))
+				stream->phy_pix_clk = get_norm_pix_clk(
+					&stream->public.timing);
+			else
+				stream->phy_pix_clk =
+						stream->public.timing.pix_clk_khz;
+		}
+	}
+}
+
+enum dc_status resource_map_pool_resources(
+		const struct core_dc *dc,
+		struct validate_context *context)
+{
+	int i, j, k;
+
+	calculate_phy_pix_clks(dc, context);
+
+	for (i = 0; i < context->target_count; i++) {
+		struct core_target *target = context->targets[i];
+
+		for (j = 0; j < target->public.stream_count; j++) {
+			struct core_stream *stream =
+				DC_STREAM_TO_CORE(target->public.streams[j]);
+
+			if (!resource_is_stream_unchanged(dc->current_context, stream))
+				continue;
+
+			/* mark resources used for stream that is already active */
+			for (k = 0; k < MAX_PIPES; k++) {
+				struct pipe_ctx *pipe_ctx =
+					&context->res_ctx.pipe_ctx[k];
+				const struct pipe_ctx *old_pipe_ctx =
+					&dc->current_context->res_ctx.pipe_ctx[k];
+
+				if (!are_stream_backends_same(old_pipe_ctx->stream, stream))
+					continue;
+
+				pipe_ctx->stream = stream;
+				copy_pipe_ctx(old_pipe_ctx, pipe_ctx);
+
+				set_stream_engine_in_use(
+					&context->res_ctx,
+					pipe_ctx->stream_enc);
+
+				/* Switch to dp clock source only if there is
+				 * no non dp stream that shares the same timing
+				 * with the dp stream.
+				 */
+				if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
+					!find_pll_sharable_stream(stream, context))
+					pipe_ctx->clock_source =
+						context->res_ctx.pool->dp_clock_source;
+
+				resource_reference_clock_source(
+					&context->res_ctx,
+					pipe_ctx->clock_source);
+
+				set_audio_in_use(&context->res_ctx,
+					pipe_ctx->audio);
+			}
+		}
+	}
+
+	for (i = 0; i < context->target_count; i++) {
+		struct core_target *target = context->targets[i];
+
+		for (j = 0; j < target->public.stream_count; j++) {
+			struct core_stream *stream =
+				DC_STREAM_TO_CORE(target->public.streams[j]);
+			struct pipe_ctx *pipe_ctx = NULL;
+			int pipe_idx = -1;
+
+			if (resource_is_stream_unchanged(dc->current_context, stream))
+				continue;
+			/* acquire new resources */
+			pipe_idx = acquire_first_free_pipe(
+						&context->res_ctx, stream);
+			if (pipe_idx < 0)
+				return DC_NO_CONTROLLER_RESOURCE;
+
+
+			pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
+
+			pipe_ctx->stream_enc =
+				find_first_free_match_stream_enc_for_link(
+					&context->res_ctx, stream);
+
+			if (!pipe_ctx->stream_enc)
+				return DC_NO_STREAM_ENG_RESOURCE;
+
+			set_stream_engine_in_use(
+					&context->res_ctx,
+					pipe_ctx->stream_enc);
+
+			/* TODO: Add check if ASIC support and EDID audio */
+			if (!stream->sink->converter_disable_audio &&
+						dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
+						stream->public.audio_info.mode_count) {
+				pipe_ctx->audio = find_first_free_audio(
+						&context->res_ctx);
+
+				/*
+				 * Audio assigned in order first come first get.
+				 * There are asics which has number of audio
+				 * resources less then number of pipes
+				 */
+				if (pipe_ctx->audio)
+					set_audio_in_use(
+						&context->res_ctx,
+						pipe_ctx->audio);
+			}
+
+			if (j == 0) {
+				context->target_status[i].primary_otg_inst =
+						pipe_ctx->tg->inst;
+			}
+		}
+	}
+
+	return DC_OK;
+}
+
+/* first target in the context is used to populate the rest */
+void validate_guaranteed_copy_target(
+		struct validate_context *context,
+		int max_targets)
+{
+	int i;
+
+	for (i = 1; i < max_targets; i++) {
+		context->targets[i] = context->targets[0];
+
+		copy_pipe_ctx(&context->res_ctx.pipe_ctx[0],
+			      &context->res_ctx.pipe_ctx[i]);
+		context->res_ctx.pipe_ctx[i].stream =
+				context->res_ctx.pipe_ctx[0].stream;
+
+		dc_target_retain(&context->targets[i]->public);
+		context->target_count++;
+	}
+}
+
+static void translate_info_frame(const struct hw_info_frame *hw_info_frame,
+	struct encoder_info_frame *encoder_info_frame)
+{
+	memset(
+		encoder_info_frame, 0, sizeof(struct encoder_info_frame));
+
+	/* For gamut we recalc checksum */
+	if (hw_info_frame->gamut_packet.valid) {
+		uint8_t chk_sum = 0;
+		uint8_t *ptr;
+		uint8_t i;
+
+		memmove(
+						&encoder_info_frame->gamut,
+						&hw_info_frame->gamut_packet,
+						sizeof(struct hw_info_packet));
+
+		/*start of the Gamut data. */
+		ptr = &encoder_info_frame->gamut.sb[3];
+
+		for (i = 0; i <= encoder_info_frame->gamut.sb[1]; i++)
+			chk_sum += ptr[i];
+
+		encoder_info_frame->gamut.sb[2] = (uint8_t) (0x100 - chk_sum);
+	}
+
+	if (hw_info_frame->avi_info_packet.valid) {
+		memmove(
+						&encoder_info_frame->avi,
+						&hw_info_frame->avi_info_packet,
+						sizeof(struct hw_info_packet));
+	}
+
+	if (hw_info_frame->vendor_info_packet.valid) {
+		memmove(
+						&encoder_info_frame->vendor,
+						&hw_info_frame->vendor_info_packet,
+						sizeof(struct hw_info_packet));
+	}
+
+	if (hw_info_frame->spd_packet.valid) {
+		memmove(
+						&encoder_info_frame->spd,
+						&hw_info_frame->spd_packet,
+						sizeof(struct hw_info_packet));
+	}
+
+	if (hw_info_frame->vsc_packet.valid) {
+		memmove(
+						&encoder_info_frame->vsc,
+						&hw_info_frame->vsc_packet,
+						sizeof(struct hw_info_packet));
+	}
+}
+
+static void set_avi_info_frame(
+	struct hw_info_packet *info_packet,
+		struct pipe_ctx *pipe_ctx)
+{
+	struct core_stream *stream = pipe_ctx->stream;
+	enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
+	struct info_frame info_frame = { {0} };
+	uint32_t pixel_encoding = 0;
+	enum scanning_type scan_type = SCANNING_TYPE_NODATA;
+	enum dc_aspect_ratio aspect = ASPECT_RATIO_NO_DATA;
+	bool itc = false;
+	uint8_t cn0_cn1 = 0;
+	uint8_t *check_sum = NULL;
+	uint8_t byte_index = 0;
+
+	if (info_packet == NULL)
+		return;
+
+	color_space = pipe_ctx->stream->public.output_color_space;
+
+	/* Initialize header */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.header.
+			info_frame_type = INFO_FRAME_AVI;
+	/* InfoFrameVersion_3 is defined by CEA861F (Section 6.4), but shall
+	* not be used in HDMI 2.0 (Section 10.1) */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.header.version =
+			INFO_FRAME_VERSION_2;
+	info_frame.avi_info_packet.info_packet_hdmi.bits.header.length =
+			INFO_FRAME_SIZE_AVI;
+
+	/*
+	 * IDO-defined (Y2,Y1,Y0 = 1,1,1) shall not be used by devices built
+	 * according to HDMI 2.0 spec (Section 10.1)
+	 */
+
+	switch (stream->public.timing.pixel_encoding) {
+	case PIXEL_ENCODING_YCBCR422:
+		pixel_encoding = 1;
+		break;
+
+	case PIXEL_ENCODING_YCBCR444:
+		pixel_encoding = 2;
+		break;
+	case PIXEL_ENCODING_YCBCR420:
+		pixel_encoding = 3;
+		break;
+
+	case PIXEL_ENCODING_RGB:
+	default:
+		pixel_encoding = 0;
+	}
+
+	/* Y0_Y1_Y2 : The pixel encoding */
+	/* H14b AVI InfoFrame has extension on Y-field from 2 bits to 3 bits */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.Y0_Y1_Y2 =
+		pixel_encoding;
+
+	/* A0 = 1 Active Format Information valid */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.A0 =
+		ACTIVE_FORMAT_VALID;
+
+	/* B0, B1 = 3; Bar info data is valid */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.B0_B1 =
+		BAR_INFO_BOTH_VALID;
+
+	info_frame.avi_info_packet.info_packet_hdmi.bits.SC0_SC1 =
+			PICTURE_SCALING_UNIFORM;
+
+	/* S0, S1 : Underscan / Overscan */
+	/* TODO: un-hardcode scan type */
+	scan_type = SCANNING_TYPE_UNDERSCAN;
+	info_frame.avi_info_packet.info_packet_hdmi.bits.S0_S1 = scan_type;
+
+	/* C0, C1 : Colorimetry */
+	if (color_space == COLOR_SPACE_YCBCR709)
+		info_frame.avi_info_packet.info_packet_hdmi.bits.C0_C1 =
+				COLORIMETRY_ITU709;
+	else if (color_space == COLOR_SPACE_YCBCR601)
+		info_frame.avi_info_packet.info_packet_hdmi.bits.C0_C1 =
+				COLORIMETRY_ITU601;
+	else
+		info_frame.avi_info_packet.info_packet_hdmi.bits.C0_C1 =
+				COLORIMETRY_NO_DATA;
+
+	/* TODO: un-hardcode aspect ratio */
+	aspect = stream->public.timing.aspect_ratio;
+
+	switch (aspect) {
+	case ASPECT_RATIO_4_3:
+	case ASPECT_RATIO_16_9:
+		info_frame.avi_info_packet.info_packet_hdmi.bits.M0_M1 = aspect;
+		break;
+
+	case ASPECT_RATIO_NO_DATA:
+	case ASPECT_RATIO_64_27:
+	case ASPECT_RATIO_256_135:
+	default:
+		info_frame.avi_info_packet.info_packet_hdmi.bits.M0_M1 = 0;
+	}
+
+	/* Active Format Aspect ratio - same as Picture Aspect Ratio. */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.R0_R3 =
+			ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE;
+
+	/* TODO: un-hardcode cn0_cn1 and itc */
+	cn0_cn1 = 0;
+	itc = false;
+
+	if (itc) {
+		info_frame.avi_info_packet.info_packet_hdmi.bits.ITC = 1;
+		info_frame.avi_info_packet.info_packet_hdmi.bits.CN0_CN1 =
+			cn0_cn1;
+	}
+
+	/* TODO : We should handle YCC quantization */
+	/* but we do not have matrix calculation */
+	if (color_space == COLOR_SPACE_SRGB) {
+		info_frame.avi_info_packet.info_packet_hdmi.bits.Q0_Q1 =
+						RGB_QUANTIZATION_FULL_RANGE;
+		info_frame.avi_info_packet.info_packet_hdmi.bits.YQ0_YQ1 =
+						YYC_QUANTIZATION_FULL_RANGE;
+	} else if (color_space == COLOR_SPACE_SRGB_LIMITED) {
+		info_frame.avi_info_packet.info_packet_hdmi.bits.Q0_Q1 =
+						RGB_QUANTIZATION_LIMITED_RANGE;
+		info_frame.avi_info_packet.info_packet_hdmi.bits.YQ0_YQ1 =
+						YYC_QUANTIZATION_LIMITED_RANGE;
+	} else {
+		info_frame.avi_info_packet.info_packet_hdmi.bits.Q0_Q1 =
+						RGB_QUANTIZATION_DEFAULT_RANGE;
+		info_frame.avi_info_packet.info_packet_hdmi.bits.YQ0_YQ1 =
+						YYC_QUANTIZATION_LIMITED_RANGE;
+	}
+
+	info_frame.avi_info_packet.info_packet_hdmi.bits.VIC0_VIC7 =
+					stream->public.timing.vic;
+
+	/* pixel repetition
+	 * PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
+	 * repetition start from 1 */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.PR0_PR3 = 0;
+
+	/* Bar Info
+	 * barTop:    Line Number of End of Top Bar.
+	 * barBottom: Line Number of Start of Bottom Bar.
+	 * barLeft:   Pixel Number of End of Left Bar.
+	 * barRight:  Pixel Number of Start of Right Bar. */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.bar_top =
+			stream->public.timing.v_border_top;
+	info_frame.avi_info_packet.info_packet_hdmi.bits.bar_bottom =
+		(stream->public.timing.v_border_top
+			- stream->public.timing.v_border_bottom + 1);
+	info_frame.avi_info_packet.info_packet_hdmi.bits.bar_left =
+			stream->public.timing.h_border_left;
+	info_frame.avi_info_packet.info_packet_hdmi.bits.bar_right =
+		(stream->public.timing.h_total
+			- stream->public.timing.h_border_right + 1);
+
+	/* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */
+	check_sum =
+		&info_frame.
+		avi_info_packet.info_packet_hdmi.packet_raw_data.sb[0];
+	*check_sum = INFO_FRAME_AVI + INFO_FRAME_SIZE_AVI
+			+ INFO_FRAME_VERSION_2;
+
+	for (byte_index = 1; byte_index <= INFO_FRAME_SIZE_AVI; byte_index++)
+		*check_sum += info_frame.avi_info_packet.info_packet_hdmi.
+				packet_raw_data.sb[byte_index];
+
+	/* one byte complement */
+	*check_sum = (uint8_t) (0x100 - *check_sum);
+
+	/* Store in hw_path_mode */
+	info_packet->hb0 =
+		info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.hb0;
+	info_packet->hb1 =
+		info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.hb1;
+	info_packet->hb2 =
+		info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.hb2;
+
+	for (byte_index = 0; byte_index < sizeof(info_packet->sb); byte_index++)
+		info_packet->sb[byte_index] = info_frame.avi_info_packet.
+		info_packet_hdmi.packet_raw_data.sb[byte_index];
+
+	info_packet->valid = true;
+}
+
+static void set_vendor_info_packet(struct core_stream *stream,
+		struct hw_info_packet *info_packet)
+{
+	uint32_t length = 0;
+	bool hdmi_vic_mode = false;
+	uint8_t checksum = 0;
+	uint32_t i = 0;
+	enum dc_timing_3d_format format;
+
+	ASSERT_CRITICAL(stream != NULL);
+	ASSERT_CRITICAL(info_packet != NULL);
+
+	format = stream->public.timing.timing_3d_format;
+
+	/* Can be different depending on packet content */
+	length = 5;
+
+	if (stream->public.timing.hdmi_vic != 0
+			&& stream->public.timing.h_total >= 3840
+			&& stream->public.timing.v_total >= 2160)
+		hdmi_vic_mode = true;
+
+	/* According to HDMI 1.4a CTS, VSIF should be sent
+	 * for both 3D stereo and HDMI VIC modes.
+	 * For all other modes, there is no VSIF sent.  */
+
+	if (format == TIMING_3D_FORMAT_NONE && !hdmi_vic_mode)
+		return;
+
+	/* 24bit IEEE Registration identifier (0x000c03). LSB first. */
+	info_packet->sb[1] = 0x03;
+	info_packet->sb[2] = 0x0C;
+	info_packet->sb[3] = 0x00;
+
+	/*PB4: 5 lower bytes = 0 (reserved). 3 higher bits = HDMI_Video_Format.
+	 * The value for HDMI_Video_Format are:
+	 * 0x0 (0b000) - No additional HDMI video format is presented in this
+	 * packet
+	 * 0x1 (0b001) - Extended resolution format present. 1 byte of HDMI_VIC
+	 * parameter follows
+	 * 0x2 (0b010) - 3D format indication present. 3D_Structure and
+	 * potentially 3D_Ext_Data follows
+	 * 0x3..0x7 (0b011..0b111) - reserved for future use */
+	if (format != TIMING_3D_FORMAT_NONE)
+		info_packet->sb[4] = (2 << 5);
+	else if (hdmi_vic_mode)
+		info_packet->sb[4] = (1 << 5);
+
+	/* PB5: If PB4 claims 3D timing (HDMI_Video_Format = 0x2):
+	 * 4 lower bites = 0 (reserved). 4 higher bits = 3D_Structure.
+	 * The value for 3D_Structure are:
+	 * 0x0 - Frame Packing
+	 * 0x1 - Field Alternative
+	 * 0x2 - Line Alternative
+	 * 0x3 - Side-by-Side (full)
+	 * 0x4 - L + depth
+	 * 0x5 - L + depth + graphics + graphics-depth
+	 * 0x6 - Top-and-Bottom
+	 * 0x7 - Reserved for future use
+	 * 0x8 - Side-by-Side (Half)
+	 * 0x9..0xE - Reserved for future use
+	 * 0xF - Not used */
+	switch (format) {
+	case TIMING_3D_FORMAT_HW_FRAME_PACKING:
+	case TIMING_3D_FORMAT_SW_FRAME_PACKING:
+		info_packet->sb[5] = (0x0 << 4);
+		break;
+
+	case TIMING_3D_FORMAT_SIDE_BY_SIDE:
+	case TIMING_3D_FORMAT_SBS_SW_PACKED:
+		info_packet->sb[5] = (0x8 << 4);
+		length = 6;
+		break;
+
+	case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
+	case TIMING_3D_FORMAT_TB_SW_PACKED:
+		info_packet->sb[5] = (0x6 << 4);
+		break;
+
+	default:
+		break;
+	}
+
+	/*PB5: If PB4 is set to 0x1 (extended resolution format)
+	 * fill PB5 with the correct HDMI VIC code */
+	if (hdmi_vic_mode)
+		info_packet->sb[5] = stream->public.timing.hdmi_vic;
+
+	/* Header */
+	info_packet->hb0 = 0x81; /* VSIF packet type. */
+	info_packet->hb1 = 0x01; /* Version */
+
+	/* 4 lower bits = Length, 4 higher bits = 0 (reserved) */
+	info_packet->hb2 = (uint8_t) (length);
+
+	/* Calculate checksum */
+	checksum = 0;
+	checksum += info_packet->hb0;
+	checksum += info_packet->hb1;
+	checksum += info_packet->hb2;
+
+	for (i = 1; i <= length; i++)
+		checksum += info_packet->sb[i];
+
+	info_packet->sb[0] = (uint8_t) (0x100 - checksum);
+
+	info_packet->valid = true;
+}
+
+static void set_spd_info_packet(struct core_stream *stream,
+		struct hw_info_packet *info_packet)
+{
+	/* SPD info packet for FreeSync */
+
+	unsigned char checksum = 0;
+	unsigned int idx, payload_size = 0;
+
+	/* Check if Freesync is supported. Return if false. If true,
+	 * set the corresponding bit in the info packet
+	 */
+	if (stream->public.freesync_ctx.supported == false)
+		return;
+
+	if (dc_is_hdmi_signal(stream->signal)) {
+
+		/* HEADER */
+
+		/* HB0  = Packet Type = 0x83 (Source Product
+		 *	  Descriptor InfoFrame)
+		 */
+		info_packet->hb0 = 0x83;
+
+		/* HB1  = Version = 0x01 */
+		info_packet->hb1 = 0x01;
+
+		/* HB2  = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x08] */
+		info_packet->hb2 = 0x08;
+
+		payload_size = 0x08;
+
+	} else if (dc_is_dp_signal(stream->signal)) {
+
+		/* HEADER */
+
+		/* HB0  = Secondary-data Packet ID = 0 - Only non-zero
+		 *	  when used to associate audio related info packets
+		 */
+		info_packet->hb0 = 0x00;
+
+		/* HB1  = Packet Type = 0x83 (Source Product
+		 *	  Descriptor InfoFrame)
+		 */
+		info_packet->hb1 = 0x83;
+
+		/* HB2  = [Bits 7:0 = Least significant eight bits -
+		 *	  For INFOFRAME, the value must be 1Bh]
+		 */
+		info_packet->hb2 = 0x1B;
+
+		/* HB3  = [Bits 7:2 = INFOFRAME SDP Version Number = 0x1]
+		 *	  [Bits 1:0 = Most significant two bits = 0x00]
+		 */
+		info_packet->hb3 = 0x04;
+
+		payload_size = 0x1B;
+	}
+
+	/* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
+	info_packet->sb[1] = 0x1A;
+
+	/* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */
+	info_packet->sb[2] = 0x00;
+
+	/* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */
+	info_packet->sb[3] = 0x00;
+
+	/* PB4 = Reserved */
+	info_packet->sb[4] = 0x00;
+
+	/* PB5 = Reserved */
+	info_packet->sb[5] = 0x00;
+
+	/* PB6 = [Bits 7:3 = Reserved] */
+	info_packet->sb[6] = 0x00;
+
+	if (stream->public.freesync_ctx.supported == true)
+		/* PB6 = [Bit 0 = FreeSync Supported] */
+		info_packet->sb[6] |= 0x01;
+
+	if (stream->public.freesync_ctx.enabled == true)
+		/* PB6 = [Bit 1 = FreeSync Enabled] */
+		info_packet->sb[6] |= 0x02;
+
+	if (stream->public.freesync_ctx.active == true)
+		/* PB6 = [Bit 2 = FreeSync Active] */
+		info_packet->sb[6] |= 0x04;
+
+	/* PB7 = FreeSync Minimum refresh rate (Hz) */
+	info_packet->sb[7] = (unsigned char) (stream->public.freesync_ctx.
+			min_refresh_in_micro_hz / 1000000);
+
+	/* PB8 = FreeSync Maximum refresh rate (Hz)
+	 *
+	 * Note: We do not use the maximum capable refresh rate
+	 * of the panel, because we should never go above the field
+	 * rate of the mode timing set.
+	 */
+	info_packet->sb[8] = (unsigned char) (stream->public.freesync_ctx.
+			nominal_refresh_in_micro_hz / 1000000);
+
+	/* PB9 - PB27  = Reserved */
+	for (idx = 9; idx <= 27; idx++)
+		info_packet->sb[idx] = 0x00;
+
+	/* Calculate checksum */
+	checksum += info_packet->hb0;
+	checksum += info_packet->hb1;
+	checksum += info_packet->hb2;
+	checksum += info_packet->hb3;
+
+	for (idx = 1; idx <= payload_size; idx++)
+		checksum += info_packet->sb[idx];
+
+	/* PB0 = Checksum (one byte complement) */
+	info_packet->sb[0] = (unsigned char) (0x100 - checksum);
+
+	info_packet->valid = true;
+}
+
+static void set_vsc_info_packet(struct core_stream *stream,
+		struct hw_info_packet *info_packet)
+{
+	unsigned int vscPacketRevision = 0;
+	unsigned int i;
+
+	if (stream->sink->link->public.psr_caps.psr_version != 0) {
+		vscPacketRevision = 2;
+	}
+
+	/* VSC packet not needed based on the features
+	 * supported by this DP display
+	 */
+	if (vscPacketRevision == 0)
+		return;
+
+	if (vscPacketRevision == 0x2) {
+		/* Secondary-data Packet ID = 0*/
+		info_packet->hb0 = 0x00;
+		/* 07h - Packet Type Value indicating Video
+		 * Stream Configuration packet
+		 */
+		info_packet->hb1 = 0x07;
+		/* 02h = VSC SDP supporting 3D stereo and PSR
+		 * (applies to eDP v1.3 or higher).
+		 */
+		info_packet->hb2 = 0x02;
+		/* 08h = VSC packet supporting 3D stereo + PSR
+		 * (HB2 = 02h).
+		 */
+		info_packet->hb3 = 0x08;
+
+		for (i = 0; i < 28; i++)
+			info_packet->sb[i] = 0;
+
+		info_packet->valid = true;
+	}
+
+	/*TODO: stereo 3D support and extend pixel encoding colorimetry*/
+}
+
+void resource_validate_ctx_destruct(struct validate_context *context)
+{
+	int i, j;
+
+	for (i = 0; i < context->target_count; i++) {
+		for (j = 0; j < context->target_status[i].surface_count; j++)
+			dc_surface_release(
+				context->target_status[i].surfaces[j]);
+
+		context->target_status[i].surface_count = 0;
+		dc_target_release(&context->targets[i]->public);
+	}
+}
+
+/*
+ * Copy src_ctx into dst_ctx and retain all surfaces and targets referenced
+ * by the src_ctx
+ */
+void resource_validate_ctx_copy_construct(
+		const struct validate_context *src_ctx,
+		struct validate_context *dst_ctx)
+{
+	int i, j;
+
+	*dst_ctx = *src_ctx;
+
+	for (i = 0; i < dst_ctx->res_ctx.pool->pipe_count; i++) {
+		struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i];
+
+		if (cur_pipe->top_pipe)
+			cur_pipe->top_pipe =  &dst_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
+
+		if (cur_pipe->bottom_pipe)
+			cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+
+	}
+
+	for (i = 0; i < dst_ctx->target_count; i++) {
+		dc_target_retain(&dst_ctx->targets[i]->public);
+		for (j = 0; j < dst_ctx->target_status[i].surface_count; j++)
+			dc_surface_retain(
+				dst_ctx->target_status[i].surfaces[j]);
+	}
+}
+
+struct clock_source *dc_resource_find_first_free_pll(
+		struct resource_context *res_ctx)
+{
+	int i;
+
+	for (i = 0; i < res_ctx->pool->clk_src_count; ++i) {
+		if (res_ctx->clock_source_ref_count[i] == 0)
+			return res_ctx->pool->clock_sources[i];
+	}
+
+	return NULL;
+}
+
+void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
+{
+	enum signal_type signal = SIGNAL_TYPE_NONE;
+	struct hw_info_frame info_frame = { { 0 } };
+
+	/* default all packets to invalid */
+	info_frame.avi_info_packet.valid = false;
+	info_frame.gamut_packet.valid = false;
+	info_frame.vendor_info_packet.valid = false;
+	info_frame.spd_packet.valid = false;
+	info_frame.vsc_packet.valid = false;
+
+	signal = pipe_ctx->stream->signal;
+
+	/* HDMi and DP have different info packets*/
+	if (dc_is_hdmi_signal(signal)) {
+		set_avi_info_frame(
+			&info_frame.avi_info_packet, pipe_ctx);
+		set_vendor_info_packet(
+			pipe_ctx->stream, &info_frame.vendor_info_packet);
+		set_spd_info_packet(pipe_ctx->stream, &info_frame.spd_packet);
+	}
+
+	else if (dc_is_dp_signal(signal))
+		set_vsc_info_packet(pipe_ctx->stream, &info_frame.vsc_packet);
+		set_spd_info_packet(pipe_ctx->stream, &info_frame.spd_packet);
+
+	translate_info_frame(&info_frame,
+			&pipe_ctx->encoder_info_frame);
+}
+
+enum dc_status resource_map_clock_resources(
+		const struct core_dc *dc,
+		struct validate_context *context)
+{
+	int i, j, k;
+
+	/* acquire new resources */
+	for (i = 0; i < context->target_count; i++) {
+		struct core_target *target = context->targets[i];
+
+		for (j = 0; j < target->public.stream_count; j++) {
+			struct core_stream *stream =
+				DC_STREAM_TO_CORE(target->public.streams[j]);
+
+			if (resource_is_stream_unchanged(dc->current_context, stream))
+				continue;
+
+			for (k = 0; k < MAX_PIPES; k++) {
+				struct pipe_ctx *pipe_ctx =
+					&context->res_ctx.pipe_ctx[k];
+
+				if (context->res_ctx.pipe_ctx[k].stream != stream)
+					continue;
+
+				if (dc_is_dp_signal(pipe_ctx->stream->signal)
+					|| pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+					pipe_ctx->clock_source =
+						context->res_ctx.pool->dp_clock_source;
+				else {
+					pipe_ctx->clock_source = NULL;
+
+					if (!dc->public.config.disable_disp_pll_sharing)
+						resource_find_used_clk_src_for_sharing(
+							&context->res_ctx,
+							pipe_ctx);
+
+					if (pipe_ctx->clock_source == NULL)
+						pipe_ctx->clock_source =
+							dc_resource_find_first_free_pll(&context->res_ctx);
+				}
+
+				if (pipe_ctx->clock_source == NULL)
+					return DC_NO_CLOCK_SOURCE_RESOURCE;
+
+				resource_reference_clock_source(
+						&context->res_ctx,
+						pipe_ctx->clock_source);
+
+				/* only one cs per stream regardless of mpo */
+				break;
+			}
+		}
+	}
+
+	return DC_OK;
+}
+
+/*
+ * Note: We need to disable output if clock sources change,
+ * since bios does optimization and doesn't apply if changing
+ * PHY when not already disabled.
+ */
+bool pipe_need_reprogram(
+		struct pipe_ctx *pipe_ctx_old,
+		struct pipe_ctx *pipe_ctx)
+{
+	if (pipe_ctx_old->stream->sink != pipe_ctx->stream->sink)
+		return true;
+
+	if (pipe_ctx_old->stream->signal != pipe_ctx->stream->signal)
+		return true;
+
+	if (pipe_ctx_old->audio != pipe_ctx->audio)
+		return true;
+
+	if (pipe_ctx_old->clock_source != pipe_ctx->clock_source
+			&& pipe_ctx_old->stream != pipe_ctx->stream)
+		return true;
+
+	if (pipe_ctx_old->stream_enc != pipe_ctx->stream_enc)
+		return true;
+
+	if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
+		return true;
+
+
+	return false;
+}

+ 113 - 0
drivers/gpu/drm/amd/display/dc/core/dc_sink.c

@@ -0,0 +1,113 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "core_types.h"
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+
+struct sink {
+	struct core_sink protected;
+	int ref_count;
+};
+
+#define DC_SINK_TO_SINK(dc_sink) \
+			container_of(dc_sink, struct sink, protected.public)
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+
+static void destruct(struct sink *sink)
+{
+
+}
+
+static bool construct(struct sink *sink, const struct dc_sink_init_data *init_params)
+{
+
+	struct core_link *core_link = DC_LINK_TO_LINK(init_params->link);
+
+	sink->protected.public.sink_signal = init_params->sink_signal;
+	sink->protected.link = core_link;
+	sink->protected.ctx = core_link->ctx;
+	sink->protected.dongle_max_pix_clk = init_params->dongle_max_pix_clk;
+	sink->protected.converter_disable_audio =
+			init_params->converter_disable_audio;
+
+	return true;
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+
+void dc_sink_retain(const struct dc_sink *dc_sink)
+{
+	struct sink *sink = DC_SINK_TO_SINK(dc_sink);
+
+	++sink->ref_count;
+}
+
+void dc_sink_release(const struct dc_sink *dc_sink)
+{
+	struct sink *sink = DC_SINK_TO_SINK(dc_sink);
+
+	--sink->ref_count;
+
+	if (sink->ref_count == 0) {
+		destruct(sink);
+		dm_free(sink);
+	}
+}
+
+struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params)
+{
+	struct sink *sink = dm_alloc(sizeof(*sink));
+
+	if (NULL == sink)
+		goto alloc_fail;
+
+	if (false == construct(sink, init_params))
+		goto construct_fail;
+
+	/* TODO should we move this outside to where the assignment actually happens? */
+	dc_sink_retain(&sink->protected.public);
+
+	return &sink->protected.public;
+
+construct_fail:
+	dm_free(sink);
+
+alloc_fail:
+	return NULL;
+}
+
+/*******************************************************************************
+ * Protected functions - visible only inside of DC (not visible in DM)
+ ******************************************************************************/

+ 141 - 0
drivers/gpu/drm/amd/display/dc/core/dc_stream.c

@@ -0,0 +1,141 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "resource.h"
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+
+struct stream {
+	struct core_stream protected;
+	int ref_count;
+};
+
+#define DC_STREAM_TO_STREAM(dc_stream) container_of(dc_stream, struct stream, protected.public)
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+
+static bool construct(struct core_stream *stream,
+	const struct dc_sink *dc_sink_data)
+{
+	uint32_t i = 0;
+
+	stream->sink = DC_SINK_TO_CORE(dc_sink_data);
+	stream->ctx = stream->sink->ctx;
+	stream->public.sink = dc_sink_data;
+
+	dc_sink_retain(dc_sink_data);
+
+	/* Copy audio modes */
+	/* TODO - Remove this translation */
+	for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++)
+	{
+		stream->public.audio_info.modes[i].channel_count = dc_sink_data->edid_caps.audio_modes[i].channel_count;
+		stream->public.audio_info.modes[i].format_code = dc_sink_data->edid_caps.audio_modes[i].format_code;
+		stream->public.audio_info.modes[i].sample_rates.all = dc_sink_data->edid_caps.audio_modes[i].sample_rate;
+		stream->public.audio_info.modes[i].sample_size = dc_sink_data->edid_caps.audio_modes[i].sample_size;
+	}
+	stream->public.audio_info.mode_count = dc_sink_data->edid_caps.audio_mode_count;
+	stream->public.audio_info.audio_latency = dc_sink_data->edid_caps.audio_latency;
+	stream->public.audio_info.video_latency = dc_sink_data->edid_caps.video_latency;
+	memmove(
+		stream->public.audio_info.display_name,
+		dc_sink_data->edid_caps.display_name,
+		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
+	stream->public.audio_info.manufacture_id = dc_sink_data->edid_caps.manufacturer_id;
+	stream->public.audio_info.product_id = dc_sink_data->edid_caps.product_id;
+	stream->public.audio_info.flags.all = dc_sink_data->edid_caps.speaker_flags;
+
+	/* TODO - Unhardcode port_id */
+	stream->public.audio_info.port_id[0] = 0x5558859e;
+	stream->public.audio_info.port_id[1] = 0xd989449;
+
+	/* EDID CAP translation for HDMI 2.0 */
+	stream->public.timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
+
+	stream->status.link = &stream->sink->link->public;
+
+	return true;
+}
+
+static void destruct(struct core_stream *stream)
+{
+	dc_sink_release(&stream->sink->public);
+}
+
+void dc_stream_retain(const struct dc_stream *dc_stream)
+{
+	struct stream *stream = DC_STREAM_TO_STREAM(dc_stream);
+	stream->ref_count++;
+}
+
+void dc_stream_release(const struct dc_stream *public)
+{
+	struct stream *stream = DC_STREAM_TO_STREAM(public);
+	struct core_stream *protected = DC_STREAM_TO_CORE(public);
+
+	if (public != NULL) {
+		stream->ref_count--;
+
+		if (stream->ref_count == 0) {
+			destruct(protected);
+			dm_free(stream);
+		}
+	}
+}
+
+struct dc_stream *dc_create_stream_for_sink(
+		const struct dc_sink *dc_sink)
+{
+	struct core_sink *sink = DC_SINK_TO_CORE(dc_sink);
+	struct stream *stream;
+
+	if (sink == NULL)
+		goto alloc_fail;
+
+	stream = dm_alloc(sizeof(struct stream));
+
+	if (NULL == stream)
+		goto alloc_fail;
+
+	if (false == construct(&stream->protected, dc_sink))
+			goto construct_fail;
+
+	dc_stream_retain(&stream->protected.public);
+
+	return &stream->protected.public;
+
+construct_fail:
+	dm_free(stream);
+
+alloc_fail:
+	return NULL;
+}

+ 213 - 0
drivers/gpu/drm/amd/display/dc/core/dc_surface.c

@@ -0,0 +1,213 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* DC interface (public) */
+#include "dm_services.h"
+#include "dc.h"
+
+/* DC core (private) */
+#include "core_dc.h"
+#include "transform.h"
+
+/*******************************************************************************
+ * Private structures
+ ******************************************************************************/
+struct surface {
+	struct core_surface protected;
+	enum dc_irq_source irq_source;
+	int ref_count;
+};
+
+struct gamma {
+	struct core_gamma protected;
+	int ref_count;
+};
+
+#define DC_SURFACE_TO_SURFACE(dc_surface) container_of(dc_surface, struct surface, protected.public)
+#define CORE_SURFACE_TO_SURFACE(core_surface) container_of(core_surface, struct surface, protected)
+
+#define DC_GAMMA_TO_GAMMA(dc_gamma) \
+	container_of(dc_gamma, struct gamma, protected.public)
+#define CORE_GAMMA_TO_GAMMA(core_gamma) \
+	container_of(core_gamma, struct gamma, protected)
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static bool construct(struct dc_context *ctx, struct surface *surface)
+{
+	surface->protected.ctx = ctx;
+	return true;
+}
+
+static void destruct(struct surface *surface)
+{
+
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+void enable_surface_flip_reporting(struct dc_surface *dc_surface,
+		uint32_t controller_id)
+{
+	struct surface *surface = DC_SURFACE_TO_SURFACE(dc_surface);
+	surface->irq_source = controller_id + DC_IRQ_SOURCE_PFLIP1 - 1;
+	/*register_flip_interrupt(surface);*/
+}
+
+struct dc_surface *dc_create_surface(const struct dc *dc)
+{
+	struct core_dc *core_dc = DC_TO_CORE(dc);
+
+	struct surface *surface = dm_alloc(sizeof(*surface));
+
+	if (NULL == surface)
+		goto alloc_fail;
+
+	if (false == construct(core_dc->ctx, surface))
+		goto construct_fail;
+
+	dc_surface_retain(&surface->protected.public);
+
+	return &surface->protected.public;
+
+construct_fail:
+	dm_free(surface);
+
+alloc_fail:
+	return NULL;
+}
+
+const struct dc_surface_status *dc_surface_get_status(
+		const struct dc_surface *dc_surface)
+{
+	struct dc_surface_status *surface_status;
+	struct core_surface *core_surface;
+	struct core_dc *core_dc;
+	int i;
+
+	if (dc_surface == NULL)
+		return NULL;
+
+	core_surface = DC_SURFACE_TO_CORE(dc_surface);
+
+	if (core_surface == NULL || core_surface->ctx == NULL)
+		return NULL;
+
+	surface_status = &core_surface->status;
+
+	if (core_surface->ctx == NULL || core_surface->ctx->dc == NULL)
+		return NULL;
+
+	core_dc = DC_TO_CORE(core_surface->ctx->dc);
+
+
+	if (core_dc->current_context == NULL)
+		return NULL;
+
+	for (i = 0; i < core_dc->current_context->res_ctx.pool->pipe_count;
+			i++) {
+		struct pipe_ctx *pipe_ctx =
+				&core_dc->current_context->res_ctx.pipe_ctx[i];
+
+		if (pipe_ctx->surface !=
+				DC_SURFACE_TO_CORE(dc_surface))
+			continue;
+
+		core_dc->hwss.update_pending_status(pipe_ctx);
+	}
+
+	return surface_status;
+}
+
+void dc_surface_retain(const struct dc_surface *dc_surface)
+{
+	struct surface *surface = DC_SURFACE_TO_SURFACE(dc_surface);
+
+	++surface->ref_count;
+}
+
+void dc_surface_release(const struct dc_surface *dc_surface)
+{
+	struct surface *surface = DC_SURFACE_TO_SURFACE(dc_surface);
+
+	--surface->ref_count;
+
+	if (surface->ref_count == 0) {
+		destruct(surface);
+		dm_free(surface);
+	}
+}
+
+static bool construct_gamma(struct gamma *gamma)
+{
+	return true;
+}
+
+static void destruct_gamma(struct gamma *gamma)
+{
+
+}
+
+void dc_gamma_retain(const struct dc_gamma *dc_gamma)
+{
+	struct gamma *gamma = DC_GAMMA_TO_GAMMA(dc_gamma);
+
+	++gamma->ref_count;
+}
+
+void dc_gamma_release(const struct dc_gamma *dc_gamma)
+{
+	struct gamma *gamma = DC_GAMMA_TO_GAMMA(dc_gamma);
+	--gamma->ref_count;
+
+	if (gamma->ref_count == 0) {
+		destruct_gamma(gamma);
+		dm_free(gamma);
+	}
+}
+
+struct dc_gamma *dc_create_gamma()
+{
+	struct gamma *gamma = dm_alloc(sizeof(*gamma));
+
+	if (gamma == NULL)
+		goto alloc_fail;
+
+	if (false == construct_gamma(gamma))
+		goto construct_fail;
+
+	dc_gamma_retain(&gamma->protected.public);
+
+	return &gamma->protected.public;
+
+construct_fail:
+	dm_free(gamma);
+
+alloc_fail:
+	return NULL;
+}
+

+ 334 - 0
drivers/gpu/drm/amd/display/dc/core/dc_target.c

@@ -0,0 +1,334 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+#include "resource.h"
+#include "ipp.h"
+#include "timing_generator.h"
+
+struct target {
+	struct core_target protected;
+	int ref_count;
+};
+
+#define DC_TARGET_TO_TARGET(dc_target) \
+	container_of(dc_target, struct target, protected.public)
+#define CORE_TARGET_TO_TARGET(core_target) \
+	container_of(core_target, struct target, protected)
+
+static void construct(
+	struct core_target *target,
+	struct dc_context *ctx,
+	struct dc_stream *dc_streams[],
+	uint8_t stream_count)
+{
+	uint8_t i;
+	for (i = 0; i < stream_count; i++) {
+		target->public.streams[i] = dc_streams[i];
+		dc_stream_retain(dc_streams[i]);
+	}
+
+	target->ctx = ctx;
+	target->public.stream_count = stream_count;
+}
+
+static void destruct(struct core_target *core_target)
+{
+	int i;
+
+	for (i = 0; i < core_target->public.stream_count; i++) {
+		dc_stream_release(
+			(struct dc_stream *)core_target->public.streams[i]);
+		core_target->public.streams[i] = NULL;
+	}
+}
+
+void dc_target_retain(const struct dc_target *dc_target)
+{
+	struct target *target = DC_TARGET_TO_TARGET(dc_target);
+
+	target->ref_count++;
+}
+
+void dc_target_release(const struct dc_target *dc_target)
+{
+	struct target *target = DC_TARGET_TO_TARGET(dc_target);
+	struct core_target *protected = DC_TARGET_TO_CORE(dc_target);
+
+	ASSERT(target->ref_count > 0);
+	target->ref_count--;
+	if (target->ref_count == 0) {
+		destruct(protected);
+		dm_free(target);
+	}
+}
+
+const struct dc_target_status *dc_target_get_status(
+					const struct dc_target* dc_target)
+{
+	uint8_t i;
+	struct core_target* target = DC_TARGET_TO_CORE(dc_target);
+	struct core_dc *dc = DC_TO_CORE(target->ctx->dc);
+
+	for (i = 0; i < dc->current_context->target_count; i++)
+		if (target == dc->current_context->targets[i])
+			return &dc->current_context->target_status[i];
+
+	return NULL;
+}
+
+struct dc_target *dc_create_target_for_streams(
+		struct dc_stream *dc_streams[],
+		uint8_t stream_count)
+{
+	struct core_stream *stream;
+	struct target *target;
+
+	if (0 == stream_count)
+		goto target_alloc_fail;
+
+	stream = DC_STREAM_TO_CORE(dc_streams[0]);
+
+	target = dm_alloc(sizeof(struct target));
+
+	if (NULL == target)
+		goto target_alloc_fail;
+
+	construct(&target->protected, stream->ctx, dc_streams, stream_count);
+
+	dc_target_retain(&target->protected.public);
+
+	return &target->protected.public;
+
+target_alloc_fail:
+	return NULL;
+}
+
+bool dc_target_is_connected_to_sink(
+		const struct dc_target * dc_target,
+		const struct dc_sink *dc_sink)
+{
+	struct core_target *target = DC_TARGET_TO_CORE(dc_target);
+	uint8_t i;
+	for (i = 0; i < target->public.stream_count; i++) {
+		if (target->public.streams[i]->sink == dc_sink)
+			return true;
+	}
+	return false;
+}
+
+/**
+ * Update the cursor attributes and set cursor surface address
+ */
+bool dc_target_set_cursor_attributes(
+	struct dc_target *dc_target,
+	const struct dc_cursor_attributes *attributes)
+{
+	uint8_t i, j;
+	struct core_target *target;
+	struct core_dc *core_dc;
+	struct resource_context *res_ctx;
+
+	if (NULL == dc_target) {
+		dm_error("DC: dc_target is NULL!\n");
+			return false;
+
+	}
+	if (NULL == attributes) {
+		dm_error("DC: attributes is NULL!\n");
+			return false;
+
+	}
+
+	target = DC_TARGET_TO_CORE(dc_target);
+	core_dc = DC_TO_CORE(target->ctx->dc);
+	res_ctx = &core_dc->current_context->res_ctx;
+
+	for (i = 0; i < target->public.stream_count; i++) {
+		for (j = 0; j < MAX_PIPES; j++) {
+			struct input_pixel_processor *ipp =
+						res_ctx->pipe_ctx[j].ipp;
+
+			if (res_ctx->pipe_ctx[j].stream !=
+				DC_STREAM_TO_CORE(target->public.streams[i]))
+				continue;
+
+			/* As of writing of this code cursor is on the top
+			 * plane so we only need to set it on first pipe we
+			 * find. May need to make this code dce specific later.
+			 */
+			if (ipp->funcs->ipp_cursor_set_attributes(
+							ipp, attributes))
+				return true;
+		}
+	}
+
+	return false;
+}
+
+bool dc_target_set_cursor_position(
+	struct dc_target *dc_target,
+	const struct dc_cursor_position *position)
+{
+	uint8_t i, j;
+	struct core_target *target;
+	struct core_dc *core_dc;
+	struct resource_context *res_ctx;
+
+	if (NULL == dc_target) {
+		dm_error("DC: dc_target is NULL!\n");
+		return false;
+	}
+
+	if (NULL == position) {
+		dm_error("DC: cursor position is NULL!\n");
+		return false;
+	}
+
+	target = DC_TARGET_TO_CORE(dc_target);
+	core_dc = DC_TO_CORE(target->ctx->dc);
+	res_ctx = &core_dc->current_context->res_ctx;
+
+	for (i = 0; i < target->public.stream_count; i++) {
+		for (j = 0; j < MAX_PIPES; j++) {
+			struct input_pixel_processor *ipp =
+						res_ctx->pipe_ctx[j].ipp;
+
+			if (res_ctx->pipe_ctx[j].stream !=
+				DC_STREAM_TO_CORE(target->public.streams[i]))
+				continue;
+
+			/* As of writing of this code cursor is on the top
+			 * plane so we only need to set it on first pipe we
+			 * find. May need to make this code dce specific later.
+			 */
+			ipp->funcs->ipp_cursor_set_position(ipp, position);
+			return true;
+		}
+	}
+
+	return false;
+}
+
+uint32_t dc_target_get_vblank_counter(const struct dc_target *dc_target)
+{
+	uint8_t i, j;
+	struct core_target *target = DC_TARGET_TO_CORE(dc_target);
+	struct core_dc *core_dc = DC_TO_CORE(target->ctx->dc);
+	struct resource_context *res_ctx =
+		&core_dc->current_context->res_ctx;
+
+	for (i = 0; i < target->public.stream_count; i++) {
+		for (j = 0; j < MAX_PIPES; j++) {
+			struct timing_generator *tg = res_ctx->pipe_ctx[j].tg;
+
+			if (res_ctx->pipe_ctx[j].stream !=
+				DC_STREAM_TO_CORE(target->public.streams[i]))
+				continue;
+
+			return tg->funcs->get_frame_count(tg);
+		}
+	}
+
+	return 0;
+}
+
+uint32_t dc_target_get_scanoutpos(
+		const struct dc_target *dc_target,
+		uint32_t *vbl,
+		uint32_t *position)
+{
+	uint8_t i, j;
+	struct core_target *target = DC_TARGET_TO_CORE(dc_target);
+	struct core_dc *core_dc = DC_TO_CORE(target->ctx->dc);
+	struct resource_context *res_ctx =
+		&core_dc->current_context->res_ctx;
+
+	for (i = 0; i < target->public.stream_count; i++) {
+		for (j = 0; j < MAX_PIPES; j++) {
+			struct timing_generator *tg = res_ctx->pipe_ctx[j].tg;
+
+			if (res_ctx->pipe_ctx[j].stream !=
+				DC_STREAM_TO_CORE(target->public.streams[i]))
+				continue;
+
+			return tg->funcs->get_scanoutpos(tg, vbl, position);
+		}
+	}
+
+	return 0;
+}
+
+void dc_target_log(
+	const struct dc_target *dc_target,
+	struct dal_logger *dm_logger,
+	enum dc_log_type log_type)
+{
+	int i;
+
+	const struct core_target *core_target =
+			CONST_DC_TARGET_TO_CORE(dc_target);
+
+	dm_logger_write(dm_logger,
+			log_type,
+			"core_target 0x%x: stream_count=%d\n",
+			core_target,
+			core_target->public.stream_count);
+
+	for (i = 0; i < core_target->public.stream_count; i++) {
+		const struct core_stream *core_stream =
+			DC_STREAM_TO_CORE(core_target->public.streams[i]);
+
+		dm_logger_write(dm_logger,
+			log_type,
+			"core_stream 0x%x: src: %d, %d, %d, %d; dst: %d, %d, %d, %d;\n",
+			core_stream,
+			core_stream->public.src.x,
+			core_stream->public.src.y,
+			core_stream->public.src.width,
+			core_stream->public.src.height,
+			core_stream->public.dst.x,
+			core_stream->public.dst.y,
+			core_stream->public.dst.width,
+			core_stream->public.dst.height);
+		dm_logger_write(dm_logger,
+			log_type,
+			"\tpix_clk_khz: %d, h_total: %d, v_total: %d\n",
+			core_stream->public.timing.pix_clk_khz,
+			core_stream->public.timing.h_total,
+			core_stream->public.timing.v_total);
+		dm_logger_write(dm_logger,
+			log_type,
+			"\tsink name: %s, serial: %d\n",
+			core_stream->sink->public.edid_caps.display_name,
+			core_stream->sink->public.edid_caps.serial_number);
+		dm_logger_write(dm_logger,
+			log_type,
+			"\tlink: %d\n",
+			core_stream->sink->link->public.link_index);
+	}
+}

+ 780 - 0
drivers/gpu/drm/amd/display/dc/dc.h

@@ -0,0 +1,780 @@
+/*
+ * Copyright 2012-14 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_INTERFACE_H_
+#define DC_INTERFACE_H_
+
+#include "dc_types.h"
+#include "dpcd_defs.h"
+#include "grph_object_defs.h"
+#include "logger_types.h"
+#include "gpio_types.h"
+#include "link_service_types.h"
+
+#define MAX_TARGETS 6
+#define MAX_SURFACES 6
+#define MAX_SINKS_PER_LINK 4
+
+/*******************************************************************************
+ * Display Core Interfaces
+ ******************************************************************************/
+
+struct dc_caps {
+	uint32_t max_targets;
+	uint32_t max_links;
+	uint32_t max_audios;
+	uint32_t max_slave_planes;
+	uint32_t max_downscale_ratio;
+	uint32_t i2c_speed_in_khz;
+};
+
+
+struct dc_dcc_surface_param {
+	enum surface_pixel_format format;
+	struct dc_size surface_size;
+	enum dc_scan_direction scan;
+};
+
+struct dc_dcc_setting {
+	unsigned int max_compressed_blk_size;
+	unsigned int max_uncompressed_blk_size;
+	bool independent_64b_blks;
+};
+
+struct dc_surface_dcc_cap {
+	bool capable;
+	bool const_color_support;
+
+	union {
+		struct {
+			struct dc_dcc_setting rgb;
+		} grph;
+
+		struct {
+			struct dc_dcc_setting luma;
+			struct dc_dcc_setting chroma;
+		} video;
+	};
+};
+
+/* Forward declaration*/
+struct dc;
+struct dc_surface;
+struct validate_context;
+
+struct dc_cap_funcs {
+	int i;
+};
+
+struct dc_stream_funcs {
+	bool (*adjust_vmin_vmax)(struct dc *dc,
+			const struct dc_stream **stream,
+			int num_streams,
+			int vmin,
+			int vmax);
+
+	void (*stream_update_scaling)(const struct dc *dc,
+			const struct dc_stream *dc_stream,
+			const struct rect *src,
+			const struct rect *dst);
+	bool (*set_gamut_remap)(struct dc *dc,
+			const struct dc_stream **stream, int num_streams);
+	bool (*set_backlight)(struct dc *dc, unsigned int backlight_level,
+		unsigned int frame_ramp, const struct dc_stream *stream);
+	bool (*init_dmcu_backlight_settings)(struct dc *dc);
+	bool (*set_abm_level)(struct dc *dc, unsigned int abm_level);
+	bool (*set_psr_enable)(struct dc *dc, bool enable);
+	bool (*setup_psr)(struct dc *dc, const struct dc_stream *stream);
+};
+
+struct link_training_settings;
+
+struct dc_link_funcs {
+	void (*set_drive_settings)(struct dc *dc,
+			struct link_training_settings *lt_settings);
+	void (*perform_link_training)(struct dc *dc,
+			struct dc_link_settings *link_setting,
+			bool skip_video_pattern);
+	void (*set_preferred_link_settings)(struct dc *dc,
+			struct dc_link_settings *link_setting);
+	void (*enable_hpd)(const struct dc_link *link);
+	void (*disable_hpd)(const struct dc_link *link);
+	void (*set_test_pattern)(
+			const struct dc_link *link,
+			enum dp_test_pattern test_pattern,
+			const struct link_training_settings *p_link_settings,
+			const unsigned char *p_custom_pattern,
+			unsigned int cust_pattern_size);
+};
+
+/* Structure to hold configuration flags set by dm at dc creation. */
+struct dc_config {
+	bool gpu_vm_support;
+	bool disable_disp_pll_sharing;
+};
+
+struct dc_debug {
+	bool surface_visual_confirm;
+	bool max_disp_clk;
+	bool target_trace;
+	bool surface_trace;
+	bool validation_trace;
+	bool disable_stutter;
+	bool disable_dcc;
+	bool disable_dfs_bypass;
+	bool disable_power_gate;
+	bool disable_clock_gate;
+};
+
+struct dc {
+	struct dc_caps caps;
+	struct dc_cap_funcs cap_funcs;
+	struct dc_stream_funcs stream_funcs;
+	struct dc_link_funcs link_funcs;
+	struct dc_config config;
+	struct dc_debug debug;
+};
+
+enum frame_buffer_mode {
+	FRAME_BUFFER_MODE_LOCAL_ONLY = 0,
+	FRAME_BUFFER_MODE_ZFB_ONLY,
+	FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL,
+} ;
+
+struct dchub_init_data {
+	bool dchub_initialzied;
+	bool dchub_info_valid;
+	int64_t zfb_phys_addr_base;
+	int64_t zfb_mc_base_addr;
+	uint64_t zfb_size_in_byte;
+	enum frame_buffer_mode fb_mode;
+};
+
+struct dc_init_data {
+	struct hw_asic_id asic_id;
+	void *driver; /* ctx */
+	struct cgs_device *cgs_device;
+
+	int num_virtual_links;
+	/*
+	 * If 'vbios_override' not NULL, it will be called instead
+	 * of the real VBIOS. Intended use is Diagnostics on FPGA.
+	 */
+	struct dc_bios *vbios_override;
+	enum dce_environment dce_environment;
+
+	struct dc_config flags;
+};
+
+struct dc *dc_create(const struct dc_init_data *init_params);
+
+void dc_destroy(struct dc **dc);
+
+bool dc_init_dchub(struct dc *dc, struct dchub_init_data *dh_data);
+
+/*******************************************************************************
+ * Surface Interfaces
+ ******************************************************************************/
+
+enum {
+	RGB_256X3X16 = 256,
+	FLOAT_GAMMA_RAMP_MAX = 1025
+};
+
+enum dc_gamma_ramp_type {
+	GAMMA_RAMP_RBG256X3X16,
+	GAMMA_RAMP_FLOAT,
+};
+
+struct float_rgb {
+	struct fixed32_32 red;
+	struct fixed32_32 green;
+	struct fixed32_32 blue;
+};
+
+struct dc_gamma_ramp_float {
+	struct float_rgb scale;
+	struct float_rgb offset;
+	struct float_rgb gamma_curve[FLOAT_GAMMA_RAMP_MAX];
+};
+
+struct dc_gamma_ramp_rgb256x3x16 {
+	uint16_t red[RGB_256X3X16];
+	uint16_t green[RGB_256X3X16];
+	uint16_t blue[RGB_256X3X16];
+};
+
+struct dc_gamma {
+	enum dc_gamma_ramp_type type;
+	union {
+		struct dc_gamma_ramp_rgb256x3x16 gamma_ramp_rgb256x3x16;
+		struct dc_gamma_ramp_float gamma_ramp_float;
+	};
+	uint32_t size;
+};
+
+struct dc_surface {
+	bool visible;
+	bool flip_immediate;
+	struct dc_plane_address address;
+
+	struct scaling_taps scaling_quality;
+	struct rect src_rect;
+	struct rect dst_rect;
+	struct rect clip_rect;
+
+	union plane_size plane_size;
+	union dc_tiling_info tiling_info;
+	struct dc_plane_dcc_param dcc;
+	enum dc_color_space color_space;
+
+	enum surface_pixel_format format;
+	enum dc_rotation_angle rotation;
+	bool horizontal_mirror;
+	enum plane_stereo_format stereo_format;
+
+	const struct dc_gamma *gamma_correction;
+};
+
+struct dc_plane_info {
+	union plane_size plane_size;
+	union dc_tiling_info tiling_info;
+	enum surface_pixel_format format;
+	enum dc_rotation_angle rotation;
+	bool horizontal_mirror;
+	enum plane_stereo_format stereo_format;
+	enum dc_color_space color_space; /*todo: wrong place, fits in scaling info*/
+	bool visible;
+};
+
+struct dc_scaling_info {
+		struct rect src_rect;
+		struct rect dst_rect;
+		struct rect clip_rect;
+		struct scaling_taps scaling_quality;
+};
+
+struct dc_surface_update {
+	const struct dc_surface *surface;
+
+	/* isr safe update parameters.  null means no updates */
+	struct dc_flip_addrs *flip_addr;
+	struct dc_plane_info *plane_info;
+	struct dc_scaling_info *scaling_info;
+	/* following updates require alloc/sleep/spin that is not isr safe,
+	 * null means no updates
+	 */
+	struct dc_gamma *gamma;
+
+
+};
+/*
+ * This structure is filled in by dc_surface_get_status and contains
+ * the last requested address and the currently active address so the called
+ * can determine if there are any outstanding flips
+ */
+struct dc_surface_status {
+	struct dc_plane_address requested_address;
+	struct dc_plane_address current_address;
+	bool is_flip_pending;
+};
+
+/*
+ * Create a new surface with default parameters;
+ */
+struct dc_surface *dc_create_surface(const struct dc *dc);
+const struct dc_surface_status *dc_surface_get_status(
+		const struct dc_surface *dc_surface);
+
+void dc_surface_retain(const struct dc_surface *dc_surface);
+void dc_surface_release(const struct dc_surface *dc_surface);
+
+void dc_gamma_release(const struct dc_gamma *dc_gamma);
+struct dc_gamma *dc_create_gamma(void);
+
+/*
+ * This structure holds a surface address.  There could be multiple addresses
+ * in cases such as Stereo 3D, Planar YUV, etc.  Other per-flip attributes such
+ * as frame durations and DCC format can also be set.
+ */
+struct dc_flip_addrs {
+	struct dc_plane_address address;
+	bool flip_immediate;
+	/* TODO: DCC format info */
+	/* TODO: add flip duration for FreeSync */
+};
+
+/*
+ * Optimized flip address update function.
+ *
+ * After this call:
+ *   Surface addresses and flip attributes are programmed.
+ *   Surface flip occur at next configured time (h_sync or v_sync flip)
+ */
+void dc_flip_surface_addrs(struct dc *dc,
+		const struct dc_surface *const surfaces[],
+		struct dc_flip_addrs flip_addrs[],
+		uint32_t count);
+
+/*
+ * Set up surface attributes and associate to a target
+ * The surfaces parameter is an absolute set of all surface active for the target.
+ * If no surfaces are provided, the target will be blanked; no memory read.
+ * Any flip related attribute changes must be done through this interface.
+ *
+ * After this call:
+ *   Surfaces attributes are programmed and configured to be composed into target.
+ *   This does not trigger a flip.  No surface address is programmed.
+ */
+
+bool dc_commit_surfaces_to_target(
+		struct dc *dc,
+		const struct dc_surface **dc_surfaces,
+		uint8_t surface_count,
+		struct dc_target *dc_target);
+
+bool dc_pre_update_surfaces_to_target(
+		struct dc *dc,
+		const struct dc_surface *const *new_surfaces,
+		uint8_t new_surface_count,
+		struct dc_target *dc_target);
+
+bool dc_post_update_surfaces_to_target(
+		struct dc *dc);
+
+void dc_update_surfaces_for_target(struct dc *dc, struct dc_surface_update *updates,
+		int surface_count, struct dc_target *dc_target);
+
+/*******************************************************************************
+ * Target Interfaces
+ ******************************************************************************/
+#define MAX_STREAM_NUM 1
+
+struct dc_target {
+	uint8_t stream_count;
+	const struct dc_stream *streams[MAX_STREAM_NUM];
+};
+
+/*
+ * Target status is returned from dc_target_get_status in order to get the
+ * the IRQ source, current frame counter and currently attached surfaces.
+ */
+struct dc_target_status {
+	int primary_otg_inst;
+	int cur_frame_count;
+	int surface_count;
+	const struct dc_surface *surfaces[MAX_SURFACE_NUM];
+};
+
+struct dc_target *dc_create_target_for_streams(
+		struct dc_stream *dc_streams[],
+		uint8_t stream_count);
+
+/*
+ * Get the current target status.
+ */
+const struct dc_target_status *dc_target_get_status(
+					const struct dc_target* dc_target);
+
+void dc_target_retain(const struct dc_target *dc_target);
+void dc_target_release(const struct dc_target *dc_target);
+void dc_target_log(
+	const struct dc_target *dc_target,
+	struct dal_logger *dc_logger,
+	enum dc_log_type log_type);
+
+uint8_t dc_get_current_target_count(const struct dc *dc);
+struct dc_target *dc_get_target_at_index(const struct dc *dc, uint8_t i);
+
+bool dc_target_is_connected_to_sink(
+		const struct dc_target *dc_target,
+		const struct dc_sink *dc_sink);
+
+uint32_t dc_target_get_vblank_counter(const struct dc_target *dc_target);
+
+/* TODO: Return parsed values rather than direct register read
+ * This has a dependency on the caller (amdgpu_get_crtc_scanoutpos)
+ * being refactored properly to be dce-specific
+ */
+uint32_t dc_target_get_scanoutpos(
+		const struct dc_target *dc_target,
+		uint32_t *vbl,
+		uint32_t *position);
+
+/*
+ * Structure to store surface/target associations for validation
+ */
+struct dc_validation_set {
+	const struct dc_target *target;
+	const struct dc_surface *surfaces[MAX_SURFACES];
+	uint8_t surface_count;
+};
+
+/*
+ * This function takes a set of resources and checks that they are cofunctional.
+ *
+ * After this call:
+ *   No hardware is programmed for call.  Only validation is done.
+ */
+bool dc_validate_resources(
+		const struct dc *dc,
+		const struct dc_validation_set set[],
+		uint8_t set_count);
+
+/*
+ * This function takes a target and checks if it is guaranteed to be supported.
+ * Guaranteed means that MAX_COFUNC*target is supported.
+ *
+ * After this call:
+ *   No hardware is programmed for call.  Only validation is done.
+ */
+
+bool dc_validate_guaranteed(
+		const struct dc *dc,
+		const struct dc_target *dc_target);
+
+/*
+ * Set up streams and links associated to targets to drive sinks
+ * The targets parameter is an absolute set of all active targets.
+ *
+ * After this call:
+ *   Phy, Encoder, Timing Generator are programmed and enabled.
+ *   New targets are enabled with blank stream; no memory read.
+ */
+bool dc_commit_targets(
+		struct dc *dc,
+		struct dc_target *targets[],
+		uint8_t target_count);
+
+/*******************************************************************************
+ * Stream Interfaces
+ ******************************************************************************/
+struct dc_stream {
+	const struct dc_sink *sink;
+	struct dc_crtc_timing timing;
+
+	enum dc_color_space output_color_space;
+
+	struct rect src; /* viewport in target space*/
+	struct rect dst; /* stream addressable area */
+
+	struct audio_info audio_info;
+
+	bool ignore_msa_timing_param;
+
+	struct freesync_context freesync_ctx;
+
+	/* TODO: dithering */
+	/* TODO: transfer function (CSC/regamma/gamut remap) */
+	struct colorspace_transform gamut_remap_matrix;
+	struct csc_transform csc_color_matrix;
+	/* TODO: custom INFO packets */
+	/* TODO: ABM info (DMCU) */
+	/* TODO: PSR info */
+	/* TODO: CEA VIC */
+};
+
+/**
+ * Create a new default stream for the requested sink
+ */
+struct dc_stream *dc_create_stream_for_sink(const struct dc_sink *dc_sink);
+
+void dc_stream_retain(const struct dc_stream *dc_stream);
+void dc_stream_release(const struct dc_stream *dc_stream);
+
+struct dc_stream_status {
+	/*
+	 * link this stream passes through
+	 */
+	const struct dc_link *link;
+};
+
+const struct dc_stream_status *dc_stream_get_status(
+	const struct dc_stream *dc_stream);
+
+/*******************************************************************************
+ * Link Interfaces
+ ******************************************************************************/
+
+/*
+ * A link contains one or more sinks and their connected status.
+ * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
+ */
+struct dc_link {
+	const struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
+	unsigned int sink_count;
+	const struct dc_sink *local_sink;
+	unsigned int link_index;
+	enum dc_connection_type type;
+	enum signal_type connector_signal;
+	enum dc_irq_source irq_source_hpd;
+	enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse  */
+	/* caps is the same as reported_link_cap. link_traing use
+	 * reported_link_cap. Will clean up.  TODO
+	 */
+	struct dc_link_settings reported_link_cap;
+	struct dc_link_settings verified_link_cap;
+	struct dc_link_settings max_link_setting;
+	struct dc_link_settings cur_link_settings;
+	struct dc_lane_settings cur_lane_setting;
+
+	uint8_t ddc_hw_inst;
+	uint8_t link_enc_hw_inst;
+
+	struct psr_caps psr_caps;
+	bool test_pattern_enabled;
+	union compliance_test_state compliance_test_state;
+};
+
+struct dpcd_caps {
+	union dpcd_rev dpcd_rev;
+	union max_lane_count max_ln_count;
+	union max_down_spread max_down_spread;
+
+	/* dongle type (DP converter, CV smart dongle) */
+	enum display_dongle_type dongle_type;
+	/* Dongle's downstream count. */
+	union sink_count sink_count;
+	/* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+	indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
+	bool is_dp_hdmi_s3d_converter;
+
+	bool allow_invalid_MSA_timing_param;
+	bool panel_mode_edp;
+	uint32_t sink_dev_id;
+	uint32_t branch_dev_id;
+	int8_t branch_dev_name[6];
+	int8_t branch_hw_revision;
+};
+
+struct dc_link_status {
+	struct dpcd_caps *dpcd_caps;
+};
+
+const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
+
+/*
+ * Return an enumerated dc_link.  dc_link order is constant and determined at
+ * boot time.  They cannot be created or destroyed.
+ * Use dc_get_caps() to get number of links.
+ */
+const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index);
+
+/* Return id of physical connector represented by a dc_link at link_index.*/
+const struct graphics_object_id dc_get_link_id_at_index(
+		struct dc *dc, uint32_t link_index);
+
+/* Set backlight level of an embedded panel (eDP, LVDS). */
+bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
+		uint32_t frame_ramp, const struct dc_stream *stream);
+
+bool dc_link_init_dmcu_backlight_settings(const struct dc_link *dc_link);
+
+bool dc_link_set_abm_level(const struct dc_link *dc_link, uint32_t level);
+
+bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable);
+
+bool dc_link_setup_psr(const struct dc_link *dc_link,
+		const struct dc_stream *stream);
+
+/* Request DC to detect if there is a Panel connected.
+ * boot - If this call is during initial boot.
+ * Return false for any type of detection failure or MST detection
+ * true otherwise. True meaning further action is required (status update
+ * and OS notification).
+ */
+bool dc_link_detect(const struct dc_link *dc_link, bool boot);
+
+/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
+ * Return:
+ * true - Downstream port status changed. DM should call DC to do the
+ * detection.
+ * false - no change in Downstream port status. No further action required
+ * from DM. */
+bool dc_link_handle_hpd_rx_irq(const struct dc_link *dc_link);
+
+struct dc_sink_init_data;
+
+struct dc_sink *dc_link_add_remote_sink(
+		const struct dc_link *dc_link,
+		const uint8_t *edid,
+		int len,
+		struct dc_sink_init_data *init_data);
+
+void dc_link_remove_remote_sink(
+	const struct dc_link *link,
+	const struct dc_sink *sink);
+
+/* Used by diagnostics for virtual link at the moment */
+void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink);
+
+void dc_link_dp_set_drive_settings(
+	struct dc_link *link,
+	struct link_training_settings *lt_settings);
+
+bool dc_link_dp_perform_link_training(
+	struct dc_link *link,
+	const struct dc_link_settings *link_setting,
+	bool skip_video_pattern);
+
+void dc_link_dp_enable_hpd(const struct dc_link *link);
+
+void dc_link_dp_disable_hpd(const struct dc_link *link);
+
+bool dc_link_dp_set_test_pattern(
+	const struct dc_link *link,
+	enum dp_test_pattern test_pattern,
+	const struct link_training_settings *p_link_settings,
+	const unsigned char *p_custom_pattern,
+	unsigned int cust_pattern_size);
+
+/*******************************************************************************
+ * Sink Interfaces - A sink corresponds to a display output device
+ ******************************************************************************/
+
+/*
+ * The sink structure contains EDID and other display device properties
+ */
+struct dc_sink {
+	enum signal_type sink_signal;
+	struct dc_edid dc_edid; /* raw edid */
+	struct dc_edid_caps edid_caps; /* parse display caps */
+};
+
+void dc_sink_retain(const struct dc_sink *sink);
+void dc_sink_release(const struct dc_sink *sink);
+
+const struct audio **dc_get_audios(struct dc *dc);
+
+struct dc_sink_init_data {
+	enum signal_type sink_signal;
+	const struct dc_link *link;
+	uint32_t dongle_max_pix_clk;
+	bool converter_disable_audio;
+};
+
+struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
+
+/*******************************************************************************
+ * Cursor interfaces - To manages the cursor within a target
+ ******************************************************************************/
+/* TODO: Deprecated once we switch to dc_set_cursor_position */
+bool dc_target_set_cursor_attributes(
+	struct dc_target *dc_target,
+	const struct dc_cursor_attributes *attributes);
+
+bool dc_target_set_cursor_position(
+	struct dc_target *dc_target,
+	const struct dc_cursor_position *position);
+
+/* Newer interfaces  */
+struct dc_cursor {
+	struct dc_plane_address address;
+	struct dc_cursor_attributes attributes;
+};
+
+/*
+ * Create a new cursor with default values for a given target.
+ */
+struct dc_cursor *dc_create_cursor_for_target(
+		const struct dc *dc,
+		struct dc_target *dc_target);
+
+/**
+ * Commit cursor attribute changes such as pixel format and dimensions and
+ * surface address.
+ *
+ * After this call:
+ *   Cursor address and format is programmed to the new values.
+ *   Cursor position is unmodified.
+ */
+bool dc_commit_cursor(
+		const struct dc *dc,
+		struct dc_cursor *cursor);
+
+/*
+ * Optimized cursor position update
+ *
+ * After this call:
+ *   Cursor position will be programmed as well as enable/disable bit.
+ */
+bool dc_set_cursor_position(
+		const struct dc *dc,
+		struct dc_cursor *cursor,
+		struct dc_cursor_position *pos);
+
+/*******************************************************************************
+ * Interrupt interfaces
+ ******************************************************************************/
+enum dc_irq_source dc_interrupt_to_irq_source(
+		struct dc *dc,
+		uint32_t src_id,
+		uint32_t ext_id);
+void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable);
+void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
+enum dc_irq_source dc_get_hpd_irq_source_at_index(
+		struct dc *dc, uint32_t link_index);
+
+/*******************************************************************************
+ * Power Interfaces
+ ******************************************************************************/
+
+void dc_set_power_state(
+		struct dc *dc,
+		enum dc_acpi_cm_power_state power_state,
+		enum dc_video_power_state video_power_state);
+void dc_resume(const struct dc *dc);
+
+/*******************************************************************************
+ * DDC Interfaces
+ ******************************************************************************/
+
+const struct ddc_service *dc_get_ddc_at_index(
+		struct dc *dc, uint32_t link_index);
+
+/*
+ * DPCD access interfaces
+ */
+
+bool dc_read_dpcd(
+		struct dc *dc,
+		uint32_t link_index,
+		uint32_t address,
+		uint8_t *data,
+		uint32_t size);
+
+bool dc_write_dpcd(
+		struct dc *dc,
+		uint32_t link_index,
+		uint32_t address,
+		const uint8_t *data,
+	uint32_t size);
+
+bool dc_submit_i2c(
+		struct dc *dc,
+		uint32_t link_index,
+		struct i2c_command *cmd);
+
+#endif /* DC_INTERFACE_H_ */

+ 224 - 0
drivers/gpu/drm/amd/display/dc/dc_bios_types.h

@@ -0,0 +1,224 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_BIOS_TYPES_H
+#define DC_BIOS_TYPES_H
+
+/******************************************************************************
+ * Interface file for VBIOS implementations.
+ *
+ * The default implementation is inside DC.
+ * Display Manager (which instantiates DC) has the option to supply it's own
+ * (external to DC) implementation of VBIOS, which will be called by DC, using
+ * this interface.
+ * (The intended use is Diagnostics, but other uses may appear.)
+ *****************************************************************************/
+
+#include "include/bios_parser_types.h"
+
+struct dc_vbios_funcs {
+	uint8_t (*get_connectors_number)(struct dc_bios *bios);
+
+	struct graphics_object_id (*get_encoder_id)(
+		struct dc_bios *bios,
+		uint32_t i);
+	struct graphics_object_id (*get_connector_id)(
+		struct dc_bios *bios,
+		uint8_t connector_index);
+	uint32_t (*get_dst_number)(
+		struct dc_bios *bios,
+		struct graphics_object_id id);
+
+	uint32_t (*get_gpio_record)(
+		struct dc_bios *dcb,
+		struct graphics_object_id id,
+		struct bp_gpio_cntl_info *gpio_record,
+		uint32_t record_size);
+
+	enum bp_result (*get_src_obj)(
+		struct dc_bios *bios,
+		struct graphics_object_id object_id, uint32_t index,
+		struct graphics_object_id *src_object_id);
+	enum bp_result (*get_dst_obj)(
+		struct dc_bios *bios,
+		struct graphics_object_id object_id, uint32_t index,
+		struct graphics_object_id *dest_object_id);
+
+	enum bp_result (*get_i2c_info)(
+		struct dc_bios *dcb,
+		struct graphics_object_id id,
+		struct graphics_object_i2c_info *info);
+
+	enum bp_result (*get_voltage_ddc_info)(
+		struct dc_bios *bios,
+		uint32_t index,
+		struct graphics_object_i2c_info *info);
+	enum bp_result (*get_thermal_ddc_info)(
+		struct dc_bios *bios,
+		uint32_t i2c_channel_id,
+		struct graphics_object_i2c_info *info);
+	enum bp_result (*get_hpd_info)(
+		struct dc_bios *bios,
+		struct graphics_object_id id,
+		struct graphics_object_hpd_info *info);
+	enum bp_result (*get_device_tag)(
+		struct dc_bios *bios,
+		struct graphics_object_id connector_object_id,
+		uint32_t device_tag_index,
+		struct connector_device_tag_info *info);
+	enum bp_result (*get_firmware_info)(
+		struct dc_bios *bios,
+		struct firmware_info *info);
+	enum bp_result (*get_spread_spectrum_info)(
+		struct dc_bios *bios,
+		enum as_signal_type signal,
+		uint32_t index,
+		struct spread_spectrum_info *ss_info);
+	uint32_t (*get_ss_entry_number)(
+		struct dc_bios *bios,
+		enum as_signal_type signal);
+	enum bp_result (*get_embedded_panel_info)(
+		struct dc_bios *bios,
+		struct embedded_panel_info *info);
+	enum bp_result (*get_gpio_pin_info)(
+		struct dc_bios *bios,
+		uint32_t gpio_id,
+		struct gpio_pin_info *info);
+	enum bp_result (*get_encoder_cap_info)(
+		struct dc_bios *bios,
+		struct graphics_object_id object_id,
+		struct bp_encoder_cap_info *info);
+
+	bool (*is_lid_status_changed)(
+		struct dc_bios *bios);
+	bool (*is_display_config_changed)(
+		struct dc_bios *bios);
+	bool (*is_accelerated_mode)(
+		struct dc_bios *bios);
+	void (*get_bios_event_info)(
+		struct dc_bios *bios,
+		struct bios_event_info *info);
+	void (*update_requested_backlight_level)(
+		struct dc_bios *bios,
+		uint32_t backlight_8bit);
+	uint32_t (*get_requested_backlight_level)(
+		struct dc_bios *bios);
+	void (*take_backlight_control)(
+		struct dc_bios *bios,
+		bool cntl);
+
+	bool (*is_active_display)(
+		struct dc_bios *bios,
+		enum signal_type signal,
+		const struct connector_device_tag_info *device_tag);
+	enum controller_id (*get_embedded_display_controller_id)(
+		struct dc_bios *bios);
+	uint32_t (*get_embedded_display_refresh_rate)(
+		struct dc_bios *bios);
+
+	void (*set_scratch_critical_state)(
+		struct dc_bios *bios,
+		bool state);
+	bool (*is_device_id_supported)(
+		struct dc_bios *bios,
+		struct device_id id);
+
+	/* COMMANDS */
+
+	enum bp_result (*encoder_control)(
+		struct dc_bios *bios,
+		struct bp_encoder_control *cntl);
+	enum bp_result (*transmitter_control)(
+		struct dc_bios *bios,
+		struct bp_transmitter_control *cntl);
+	enum bp_result (*crt_control)(
+		struct dc_bios *bios,
+		enum engine_id engine_id,
+		bool enable,
+		uint32_t pixel_clock);
+	enum bp_result (*enable_crtc)(
+		struct dc_bios *bios,
+		enum controller_id id,
+		bool enable);
+	enum bp_result (*adjust_pixel_clock)(
+		struct dc_bios *bios,
+		struct bp_adjust_pixel_clock_parameters *bp_params);
+	enum bp_result (*set_pixel_clock)(
+		struct dc_bios *bios,
+		struct bp_pixel_clock_parameters *bp_params);
+	enum bp_result (*set_dce_clock)(
+		struct dc_bios *bios,
+		struct bp_set_dce_clock_parameters *bp_params);
+	unsigned int (*get_smu_clock_info)(
+		struct dc_bios *bios);
+	enum bp_result (*enable_spread_spectrum_on_ppll)(
+		struct dc_bios *bios,
+		struct bp_spread_spectrum_parameters *bp_params,
+		bool enable);
+	enum bp_result (*program_crtc_timing)(
+		struct dc_bios *bios,
+		struct bp_hw_crtc_timing_parameters *bp_params);
+
+	enum bp_result (*crtc_source_select)(
+		struct dc_bios *bios,
+		struct bp_crtc_source_select *bp_params);
+	enum bp_result (*program_display_engine_pll)(
+		struct dc_bios *bios,
+		struct bp_pixel_clock_parameters *bp_params);
+
+	enum signal_type (*dac_load_detect)(
+		struct dc_bios *bios,
+		struct graphics_object_id encoder,
+		struct graphics_object_id connector,
+		enum signal_type display_signal);
+
+	enum bp_result (*enable_disp_power_gating)(
+		struct dc_bios *bios,
+		enum controller_id controller_id,
+		enum bp_pipe_control_action action);
+
+	void (*post_init)(struct dc_bios *bios);
+
+	void (*bios_parser_destroy)(struct dc_bios **dcb);
+};
+
+struct bios_registers {
+	uint32_t BIOS_SCRATCH_6;
+};
+
+struct dc_bios {
+	const struct dc_vbios_funcs *funcs;
+
+	uint8_t *bios;
+	uint32_t bios_size;
+
+	uint8_t *bios_local_image;
+
+	struct dc_context *ctx;
+	const struct bios_registers *regs;
+	struct integrated_info *integrated_info;
+};
+
+#endif /* DC_BIOS_TYPES_H */

+ 115 - 0
drivers/gpu/drm/amd/display/dc/dc_ddc_types.h

@@ -0,0 +1,115 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef DC_DDC_TYPES_H_
+#define DC_DDC_TYPES_H_
+
+struct i2c_payload {
+	bool write;
+	uint8_t address;
+	uint32_t length;
+	uint8_t *data;
+};
+
+enum i2c_command_engine {
+	I2C_COMMAND_ENGINE_DEFAULT,
+	I2C_COMMAND_ENGINE_SW,
+	I2C_COMMAND_ENGINE_HW
+};
+
+struct i2c_command {
+	struct i2c_payload *payloads;
+	uint8_t number_of_payloads;
+
+	enum i2c_command_engine engine;
+
+	/* expressed in KHz
+	 * zero means "use default value" */
+	uint32_t speed;
+};
+
+struct gpio_ddc_hw_info {
+	bool hw_supported;
+	uint32_t ddc_channel;
+};
+
+struct ddc {
+	struct gpio *pin_data;
+	struct gpio *pin_clock;
+	struct gpio_ddc_hw_info hw_info;
+	struct dc_context *ctx;
+};
+
+union ddc_wa {
+	struct {
+		uint32_t DP_SKIP_POWER_OFF:1;
+		uint32_t DP_AUX_POWER_UP_WA_DELAY:1;
+	} bits;
+	uint32_t raw;
+};
+
+struct ddc_flags {
+	uint8_t EDID_QUERY_DONE_ONCE:1;
+	uint8_t IS_INTERNAL_DISPLAY:1;
+	uint8_t FORCE_READ_REPEATED_START:1;
+	uint8_t EDID_STRESS_READ:1;
+
+};
+
+enum ddc_transaction_type {
+	DDC_TRANSACTION_TYPE_NONE = 0,
+	DDC_TRANSACTION_TYPE_I2C,
+	DDC_TRANSACTION_TYPE_I2C_OVER_AUX,
+	DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER,
+	DDC_TRANSACTION_TYPE_I2C_OVER_AUX_RETRY_DEFER
+};
+
+enum display_dongle_type {
+	DISPLAY_DONGLE_NONE = 0,
+	/* Active converter types*/
+	DISPLAY_DONGLE_DP_VGA_CONVERTER,
+	DISPLAY_DONGLE_DP_DVI_CONVERTER,
+	DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+	/* DP-HDMI/DVI passive dongles (Type 1 and Type 2)*/
+	DISPLAY_DONGLE_DP_DVI_DONGLE,
+	DISPLAY_DONGLE_DP_HDMI_DONGLE,
+	/* Other types of dongle*/
+	DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE,
+};
+
+struct ddc_service {
+	struct ddc *ddc_pin;
+	struct ddc_flags flags;
+	union ddc_wa wa;
+	enum ddc_transaction_type transaction_type;
+	enum display_dongle_type dongle_type;
+	struct dc_context *ctx;
+	struct core_link *link;
+
+	uint32_t address;
+	uint32_t edid_buf_len;
+	uint8_t edid_buf[MAX_EDID_BUFFER_SIZE];
+};
+
+#endif /* DC_DDC_TYPES_H_ */

+ 105 - 0
drivers/gpu/drm/amd/display/dc/dc_dp_types.h

@@ -0,0 +1,105 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_DP_TYPES_H
+#define DC_DP_TYPES_H
+
+enum dc_lane_count {
+	LANE_COUNT_UNKNOWN = 0,
+	LANE_COUNT_ONE = 1,
+	LANE_COUNT_TWO = 2,
+	LANE_COUNT_FOUR = 4,
+	LANE_COUNT_EIGHT = 8,
+	LANE_COUNT_DP_MAX = LANE_COUNT_FOUR
+};
+
+/* This is actually a reference clock (27MHz) multiplier
+ * 162MBps bandwidth for 1.62GHz like rate,
+ * 270MBps for 2.70GHz,
+ * 324MBps for 3.24Ghz,
+ * 540MBps for 5.40GHz
+ * 810MBps for 8.10GHz
+ */
+enum dc_link_rate {
+	LINK_RATE_UNKNOWN = 0,
+	LINK_RATE_LOW = 0x06,
+	LINK_RATE_HIGH = 0x0A,
+	LINK_RATE_RBR2 = 0x0C,
+	LINK_RATE_HIGH2 = 0x14,
+	LINK_RATE_HIGH3 = 0x1E
+};
+
+enum dc_link_spread {
+	LINK_SPREAD_DISABLED = 0x00,
+	/* 0.5 % downspread 30 kHz */
+	LINK_SPREAD_05_DOWNSPREAD_30KHZ = 0x10,
+	/* 0.5 % downspread 33 kHz */
+	LINK_SPREAD_05_DOWNSPREAD_33KHZ = 0x11
+};
+
+enum dc_voltage_swing {
+	VOLTAGE_SWING_LEVEL0 = 0,	/* direct HW translation! */
+	VOLTAGE_SWING_LEVEL1,
+	VOLTAGE_SWING_LEVEL2,
+	VOLTAGE_SWING_LEVEL3,
+	VOLTAGE_SWING_MAX_LEVEL = VOLTAGE_SWING_LEVEL3
+};
+
+enum dc_pre_emphasis {
+	PRE_EMPHASIS_DISABLED = 0,	/* direct HW translation! */
+	PRE_EMPHASIS_LEVEL1,
+	PRE_EMPHASIS_LEVEL2,
+	PRE_EMPHASIS_LEVEL3,
+	PRE_EMPHASIS_MAX_LEVEL = PRE_EMPHASIS_LEVEL3
+};
+/* Post Cursor 2 is optional for transmitter
+ * and it applies only to the main link operating at HBR2
+ */
+enum dc_post_cursor2 {
+	POST_CURSOR2_DISABLED = 0,	/* direct HW translation! */
+	POST_CURSOR2_LEVEL1,
+	POST_CURSOR2_LEVEL2,
+	POST_CURSOR2_LEVEL3,
+	POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3,
+};
+
+struct dc_link_settings {
+	enum dc_lane_count lane_count;
+	enum dc_link_rate link_rate;
+	enum dc_link_spread link_spread;
+};
+
+struct dc_lane_settings {
+	enum dc_voltage_swing VOLTAGE_SWING;
+	enum dc_pre_emphasis PRE_EMPHASIS;
+	enum dc_post_cursor2 POST_CURSOR2;
+};
+
+struct dc_link_training_settings {
+	struct dc_link_settings link;
+	struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
+};
+
+#endif /* DC_DP_TYPES_H */

+ 144 - 0
drivers/gpu/drm/amd/display/dc/dc_helper.c

@@ -0,0 +1,144 @@
+/*
+ * dc_helper.c
+ *
+ *  Created on: Aug 30, 2016
+ *      Author: agrodzov
+ */
+#include "dm_services.h"
+#include <stdarg.h>
+
+uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+		uint32_t addr, uint32_t reg_val, int n,
+		uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+		...)
+{
+	uint32_t shift, mask, field_value;
+	int i = 1;
+
+	va_list ap;
+	va_start(ap, field_value1);
+
+	reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
+
+	while (i < n) {
+		shift = va_arg(ap, uint32_t);
+		mask = va_arg(ap, uint32_t);
+		field_value = va_arg(ap, uint32_t);
+
+		reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
+		i++;
+	}
+
+	dm_write_reg(ctx, addr, reg_val);
+	va_end(ap);
+
+	return reg_val;
+}
+
+uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
+		uint8_t shift, uint32_t mask, uint32_t *field_value)
+{
+	uint32_t reg_val = dm_read_reg(ctx, addr);
+	*field_value = get_reg_field_value_ex(reg_val, mask, shift);
+	return reg_val;
+}
+
+uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr,
+		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+		uint8_t shift2, uint32_t mask2, uint32_t *field_value2)
+{
+	uint32_t reg_val = dm_read_reg(ctx, addr);
+	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+	return reg_val;
+}
+
+uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr,
+		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+		uint8_t shift3, uint32_t mask3, uint32_t *field_value3)
+{
+	uint32_t reg_val = dm_read_reg(ctx, addr);
+	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+	*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+	return reg_val;
+}
+
+uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
+		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+		uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+		uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+		uint8_t shift5, uint32_t mask5, uint32_t *field_value5)
+{
+	uint32_t reg_val = dm_read_reg(ctx, addr);
+	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+	*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+	*field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
+	*field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
+	return reg_val;
+}
+
+/* note:  va version of this is pretty bad idea, since there is a output parameter pass by pointer
+ * compiler won't be able to check for size match and is prone to stack corruption type of bugs
+
+uint32_t generic_reg_get(const struct dc_context *ctx,
+		uint32_t addr, int n, ...)
+{
+	uint32_t shift, mask;
+	uint32_t *field_value;
+	uint32_t reg_val;
+	int i = 0;
+
+	reg_val = dm_read_reg(ctx, addr);
+
+	va_list ap;
+	va_start(ap, n);
+
+	while (i < n) {
+		shift = va_arg(ap, uint32_t);
+		mask = va_arg(ap, uint32_t);
+		field_value = va_arg(ap, uint32_t *);
+
+		*field_value = get_reg_field_value_ex(reg_val, mask, shift);
+		i++;
+	}
+
+	va_end(ap);
+
+	return reg_val;
+}
+*/
+
+uint32_t generic_reg_wait(const struct dc_context *ctx,
+	uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value,
+	unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
+	const char *func_name)
+{
+	uint32_t field_value;
+	uint32_t reg_val;
+	int i;
+
+	for (i = 0; i <= time_out_num_tries; i++) {
+		if (i) {
+			if (0 < delay_between_poll_us && delay_between_poll_us < 1000)
+				udelay(delay_between_poll_us);
+
+			if (delay_between_poll_us > 1000)
+				msleep(delay_between_poll_us/1000);
+		}
+
+		reg_val = dm_read_reg(ctx, addr);
+
+		field_value = get_reg_field_value_ex(reg_val, mask, shift);
+
+		if (field_value == condition_value)
+			return reg_val;
+	}
+
+	DC_ERR("REG_WAIT timeout %dus * %d tries - %s",
+			delay_between_poll_us, time_out_num_tries, func_name);
+	return reg_val;
+}

+ 588 - 0
drivers/gpu/drm/amd/display/dc/dc_hw_types.h

@@ -0,0 +1,588 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_HW_TYPES_H
+#define DC_HW_TYPES_H
+
+#include "os_types.h"
+
+/******************************************************************************
+ * Data types for Virtual HW Layer of DAL3.
+ * (see DAL3 design documents for HW Layer definition)
+ *
+ * The intended uses are:
+ * 1. Generation pseudocode sequences for HW programming.
+ * 2. Implementation of real HW programming by HW Sequencer of DAL3.
+ *
+ * Note: do *not* add any types which are *not* used for HW programming - this
+ * will ensure separation of Logic layer from HW layer.
+ ******************************************************************************/
+
+union large_integer {
+	struct {
+		uint32_t low_part;
+		int32_t high_part;
+	};
+
+	struct {
+		uint32_t low_part;
+		int32_t high_part;
+	} u;
+
+	int64_t quad_part;
+};
+
+#define PHYSICAL_ADDRESS_LOC union large_integer
+
+enum dc_plane_addr_type {
+	PLN_ADDR_TYPE_GRAPHICS = 0,
+	PLN_ADDR_TYPE_GRPH_STEREO,
+	PLN_ADDR_TYPE_VIDEO_PROGRESSIVE,
+};
+
+struct dc_plane_address {
+	enum dc_plane_addr_type type;
+	union {
+		struct{
+			PHYSICAL_ADDRESS_LOC addr;
+			PHYSICAL_ADDRESS_LOC meta_addr;
+			union large_integer dcc_const_color;
+		} grph;
+
+		/*stereo*/
+		struct {
+			PHYSICAL_ADDRESS_LOC left_addr;
+			PHYSICAL_ADDRESS_LOC left_meta_addr;
+			union large_integer left_dcc_const_color;
+
+			PHYSICAL_ADDRESS_LOC right_addr;
+			PHYSICAL_ADDRESS_LOC right_meta_addr;
+			union large_integer right_dcc_const_color;
+
+		} grph_stereo;
+
+		/*video  progressive*/
+		struct {
+			PHYSICAL_ADDRESS_LOC luma_addr;
+			PHYSICAL_ADDRESS_LOC luma_meta_addr;
+			union large_integer luma_dcc_const_color;
+
+			PHYSICAL_ADDRESS_LOC chroma_addr;
+			PHYSICAL_ADDRESS_LOC chroma_meta_addr;
+			union large_integer chroma_dcc_const_color;
+		} video_progressive;
+	};
+};
+
+struct dc_size {
+	uint32_t width;
+	uint32_t height;
+};
+
+struct rect {
+	int x;
+	int y;
+	uint32_t width;
+	uint32_t height;
+};
+
+union plane_size {
+	/* Grph or Video will be selected
+	 * based on format above:
+	 * Use Video structure if
+	 * format >= DalPixelFormat_VideoBegin
+	 * else use Grph structure
+	 */
+	struct {
+		struct rect surface_size;
+		/* Graphic surface pitch in pixels.
+		 * In LINEAR_GENERAL mode, pitch
+		 * is 32 pixel aligned.
+		 */
+		uint32_t surface_pitch;
+
+		uint32_t meta_pitch;
+	} grph;
+
+	struct {
+		struct rect luma_size;
+		/* Graphic surface pitch in pixels.
+		 * In LINEAR_GENERAL mode, pitch is
+		 * 32 pixel aligned.
+		 */
+		uint32_t luma_pitch;
+		uint32_t meta_luma_pitch;
+
+		struct rect chroma_size;
+		/* Graphic surface pitch in pixels.
+		 * In LINEAR_GENERAL mode, pitch is
+		 * 32 pixel aligned.
+		 */
+		uint32_t chroma_pitch;
+		uint32_t meta_chroma_pitch;
+	} video;
+};
+
+struct dc_plane_dcc_param {
+	bool enable;
+
+	union {
+		struct {
+			uint32_t meta_pitch;
+			bool independent_64b_blks;
+		} grph;
+
+		struct {
+			uint32_t meta_pitch_l;
+			bool independent_64b_blks_l;
+
+			uint32_t meta_pitch_c;
+			bool independent_64b_blks_c;
+		} video;
+	};
+};
+
+/*Displayable pixel format in fb*/
+enum surface_pixel_format {
+	SURFACE_PIXEL_FORMAT_GRPH_BEGIN = 0,
+	/*TOBE REMOVED paletta 256 colors*/
+	SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS =
+		SURFACE_PIXEL_FORMAT_GRPH_BEGIN,
+	/*16 bpp*/
+	SURFACE_PIXEL_FORMAT_GRPH_ARGB1555,
+	/*16 bpp*/
+	SURFACE_PIXEL_FORMAT_GRPH_RGB565,
+	/*32 bpp*/
+	SURFACE_PIXEL_FORMAT_GRPH_ARGB8888,
+	/*32 bpp swaped*/
+	SURFACE_PIXEL_FORMAT_GRPH_BGRA8888,
+
+	SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010,
+	/*swaped*/
+	SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010,
+	/*TOBE REMOVED swaped, XR_BIAS has no differance
+	 * for pixel layout than previous and we can
+	 * delete this after discusion*/
+	SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS,
+	/*64 bpp */
+	SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616,
+	/*float*/
+	SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F,
+	/*swaped & float*/
+	SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
+	/*grow graphics here if necessary */
+
+	SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
+	SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
+		SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
+	SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb,
+	SURFACE_PIXEL_FORMAT_INVALID
+
+	/*grow 444 video here if necessary */
+};
+
+/* Pixel format */
+enum pixel_format {
+	/*graph*/
+	PIXEL_FORMAT_UNINITIALIZED,
+	PIXEL_FORMAT_INDEX8,
+	PIXEL_FORMAT_RGB565,
+	PIXEL_FORMAT_ARGB8888,
+	PIXEL_FORMAT_ARGB2101010,
+	PIXEL_FORMAT_ARGB2101010_XRBIAS,
+	PIXEL_FORMAT_FP16,
+	/*video*/
+	PIXEL_FORMAT_420BPP12,
+	/*end of pixel format definition*/
+	PIXEL_FORMAT_INVALID,
+
+	PIXEL_FORMAT_GRPH_BEGIN = PIXEL_FORMAT_INDEX8,
+	PIXEL_FORMAT_GRPH_END = PIXEL_FORMAT_FP16,
+	PIXEL_FORMAT_VIDEO_BEGIN = PIXEL_FORMAT_420BPP12,
+	PIXEL_FORMAT_VIDEO_END = PIXEL_FORMAT_420BPP12,
+	PIXEL_FORMAT_UNKNOWN
+};
+
+enum tile_split_values {
+	DC_DISPLAY_MICRO_TILING = 0x0,
+	DC_THIN_MICRO_TILING = 0x1,
+	DC_DEPTH_MICRO_TILING = 0x2,
+	DC_ROTATED_MICRO_TILING = 0x3,
+};
+
+/* TODO: These values come from hardware spec. We need to readdress this
+ * if they ever change.
+ */
+enum array_mode_values {
+	DC_ARRAY_LINEAR_GENERAL = 0,
+	DC_ARRAY_LINEAR_ALLIGNED,
+	DC_ARRAY_1D_TILED_THIN1,
+	DC_ARRAY_1D_TILED_THICK,
+	DC_ARRAY_2D_TILED_THIN1,
+	DC_ARRAY_PRT_TILED_THIN1,
+	DC_ARRAY_PRT_2D_TILED_THIN1,
+	DC_ARRAY_2D_TILED_THICK,
+	DC_ARRAY_2D_TILED_X_THICK,
+	DC_ARRAY_PRT_TILED_THICK,
+	DC_ARRAY_PRT_2D_TILED_THICK,
+	DC_ARRAY_PRT_3D_TILED_THIN1,
+	DC_ARRAY_3D_TILED_THIN1,
+	DC_ARRAY_3D_TILED_THICK,
+	DC_ARRAY_3D_TILED_X_THICK,
+	DC_ARRAY_PRT_3D_TILED_THICK,
+};
+
+enum tile_mode_values {
+	DC_ADDR_SURF_MICRO_TILING_DISPLAY = 0x0,
+	DC_ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1,
+};
+
+union dc_tiling_info {
+
+	struct {
+		/* Specifies the number of memory banks for tiling
+		 *	purposes.
+		 * Only applies to 2D and 3D tiling modes.
+		 *	POSSIBLE VALUES: 2,4,8,16
+		 */
+		unsigned int num_banks;
+		/* Specifies the number of tiles in the x direction
+		 *	to be incorporated into the same bank.
+		 * Only applies to 2D and 3D tiling modes.
+		 *	POSSIBLE VALUES: 1,2,4,8
+		 */
+		unsigned int bank_width;
+		unsigned int bank_width_c;
+		/* Specifies the number of tiles in the y direction to
+		 *	be incorporated into the same bank.
+		 * Only applies to 2D and 3D tiling modes.
+		 *	POSSIBLE VALUES: 1,2,4,8
+		 */
+		unsigned int bank_height;
+		unsigned int bank_height_c;
+		/* Specifies the macro tile aspect ratio. Only applies
+		 * to 2D and 3D tiling modes.
+		 */
+		unsigned int tile_aspect;
+		unsigned int tile_aspect_c;
+		/* Specifies the number of bytes that will be stored
+		 *	contiguously for each tile.
+		 * If the tile data requires more storage than this
+		 *	amount, it is split into multiple slices.
+		 * This field must not be larger than
+		 *	GB_ADDR_CONFIG.DRAM_ROW_SIZE.
+		 * Only applies to 2D and 3D tiling modes.
+		 * For color render targets, TILE_SPLIT >= 256B.
+		 */
+		enum tile_split_values tile_split;
+		enum tile_split_values tile_split_c;
+		/* Specifies the addressing within a tile.
+		 *	0x0 - DISPLAY_MICRO_TILING
+		 *	0x1 - THIN_MICRO_TILING
+		 *	0x2 - DEPTH_MICRO_TILING
+		 *	0x3 - ROTATED_MICRO_TILING
+		 */
+		enum tile_mode_values tile_mode;
+		enum tile_mode_values tile_mode_c;
+		/* Specifies the number of pipes and how they are
+		 *	interleaved in the surface.
+		 * Refer to memory addressing document for complete
+		 *	details and constraints.
+		 */
+		unsigned int pipe_config;
+		/* Specifies the tiling mode of the surface.
+		 * THIN tiles use an 8x8x1 tile size.
+		 * THICK tiles use an 8x8x4 tile size.
+		 * 2D tiling modes rotate banks for successive Z slices
+		 * 3D tiling modes rotate pipes and banks for Z slices
+		 * Refer to memory addressing document for complete
+		 *	details and constraints.
+		 */
+		enum array_mode_values array_mode;
+	} gfx8;
+
+};
+
+/* Rotation angle */
+enum dc_rotation_angle {
+	ROTATION_ANGLE_0 = 0,
+	ROTATION_ANGLE_90,
+	ROTATION_ANGLE_180,
+	ROTATION_ANGLE_270,
+	ROTATION_ANGLE_COUNT
+};
+
+enum dc_scan_direction {
+	SCAN_DIRECTION_UNKNOWN = 0,
+	SCAN_DIRECTION_HORIZONTAL = 1,  /* 0, 180 rotation */
+	SCAN_DIRECTION_VERTICAL = 2,    /* 90, 270 rotation */
+};
+
+struct dc_cursor_position {
+	uint32_t x;
+	uint32_t y;
+
+	uint32_t x_hotspot;
+	uint32_t y_hotspot;
+
+	/*
+	 * This parameter indicates whether HW cursor should be enabled
+	 */
+	bool enable;
+
+	/*
+	 * This parameter indicates whether cursor hot spot should be
+	 * programmed
+	 */
+	bool hot_spot_enable;
+};
+
+/* IPP related types */
+
+/* Used by both ipp amd opp functions*/
+/* TODO: to be consolidated with enum color_space */
+
+/*
+ * This enum is for programming CURSOR_MODE register field. What this register
+ * should be programmed to depends on OS requested cursor shape flags and what
+ * we stored in the cursor surface.
+ */
+enum dc_cursor_color_format {
+	CURSOR_MODE_MONO,
+	CURSOR_MODE_COLOR_1BIT_AND,
+	CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA,
+	CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA
+};
+
+/*
+ * This is all the parameters required by DAL in order to update the cursor
+ * attributes, including the new cursor image surface address, size, hotspot
+ * location, color format, etc.
+ */
+
+union dc_cursor_attribute_flags {
+	struct {
+		uint32_t ENABLE_MAGNIFICATION:1;
+		uint32_t INVERSE_TRANSPARENT_CLAMPING:1;
+		uint32_t HORIZONTAL_MIRROR:1;
+		uint32_t VERTICAL_MIRROR:1;
+		uint32_t INVERT_PIXEL_DATA:1;
+		uint32_t ZERO_EXPANSION:1;
+		uint32_t MIN_MAX_INVERT:1;
+		uint32_t RESERVED:25;
+	} bits;
+	uint32_t value;
+};
+
+struct dc_cursor_attributes {
+	PHYSICAL_ADDRESS_LOC address;
+
+	/* Width and height should correspond to cursor surface width x heigh */
+	uint32_t width;
+	uint32_t height;
+	uint32_t x_hot;
+	uint32_t y_hot;
+
+	enum dc_cursor_color_format color_format;
+
+	/* In case we support HW Cursor rotation in the future */
+	enum dc_rotation_angle rotation_angle;
+
+	union dc_cursor_attribute_flags attribute_flags;
+};
+
+/* OPP */
+
+enum dc_color_space {
+	COLOR_SPACE_UNKNOWN,
+	COLOR_SPACE_SRGB,
+	COLOR_SPACE_SRGB_LIMITED,
+	COLOR_SPACE_YPBPR601,
+	COLOR_SPACE_YPBPR709,
+	COLOR_SPACE_YCBCR601,
+	COLOR_SPACE_YCBCR709,
+	COLOR_SPACE_YCBCR601_LIMITED,
+	COLOR_SPACE_YCBCR709_LIMITED
+};
+
+enum dc_quantization_range {
+	QUANTIZATION_RANGE_UNKNOWN,
+	QUANTIZATION_RANGE_FULL,
+	QUANTIZATION_RANGE_LIMITED
+};
+
+/* XFM */
+
+/* used in  struct dc_surface */
+struct scaling_taps {
+	uint32_t v_taps;
+	uint32_t h_taps;
+	uint32_t v_taps_c;
+	uint32_t h_taps_c;
+};
+
+enum dc_timing_standard {
+	TIMING_STANDARD_UNDEFINED,
+	TIMING_STANDARD_DMT,
+	TIMING_STANDARD_GTF,
+	TIMING_STANDARD_CVT,
+	TIMING_STANDARD_CVT_RB,
+	TIMING_STANDARD_CEA770,
+	TIMING_STANDARD_CEA861,
+	TIMING_STANDARD_HDMI,
+	TIMING_STANDARD_TV_NTSC,
+	TIMING_STANDARD_TV_NTSC_J,
+	TIMING_STANDARD_TV_PAL,
+	TIMING_STANDARD_TV_PAL_M,
+	TIMING_STANDARD_TV_PAL_CN,
+	TIMING_STANDARD_TV_SECAM,
+	TIMING_STANDARD_EXPLICIT,
+	/*!< For explicit timings from EDID, VBIOS, etc.*/
+	TIMING_STANDARD_USER_OVERRIDE,
+	/*!< For mode timing override by user*/
+	TIMING_STANDARD_MAX
+};
+
+enum dc_timing_3d_format {
+	TIMING_3D_FORMAT_NONE,
+	TIMING_3D_FORMAT_FRAME_ALTERNATE, /* No stereosync at all*/
+	TIMING_3D_FORMAT_INBAND_FA, /* Inband Frame Alternate (DVI/DP)*/
+	TIMING_3D_FORMAT_DP_HDMI_INBAND_FA, /* Inband FA to HDMI Frame Pack*/
+	/* for active DP-HDMI dongle*/
+	TIMING_3D_FORMAT_SIDEBAND_FA, /* Sideband Frame Alternate (eDP)*/
+	TIMING_3D_FORMAT_HW_FRAME_PACKING,
+	TIMING_3D_FORMAT_SW_FRAME_PACKING,
+	TIMING_3D_FORMAT_ROW_INTERLEAVE,
+	TIMING_3D_FORMAT_COLUMN_INTERLEAVE,
+	TIMING_3D_FORMAT_PIXEL_INTERLEAVE,
+	TIMING_3D_FORMAT_SIDE_BY_SIDE,
+	TIMING_3D_FORMAT_TOP_AND_BOTTOM,
+	TIMING_3D_FORMAT_SBS_SW_PACKED,
+	/* Side-by-side, packed by application/driver into 2D frame*/
+	TIMING_3D_FORMAT_TB_SW_PACKED,
+	/* Top-and-bottom, packed by application/driver into 2D frame*/
+
+	TIMING_3D_FORMAT_MAX,
+};
+
+enum dc_color_depth {
+	COLOR_DEPTH_UNDEFINED,
+	COLOR_DEPTH_666,
+	COLOR_DEPTH_888,
+	COLOR_DEPTH_101010,
+	COLOR_DEPTH_121212,
+	COLOR_DEPTH_141414,
+	COLOR_DEPTH_161616,
+	COLOR_DEPTH_COUNT
+};
+
+enum dc_pixel_encoding {
+	PIXEL_ENCODING_UNDEFINED,
+	PIXEL_ENCODING_RGB,
+	PIXEL_ENCODING_YCBCR422,
+	PIXEL_ENCODING_YCBCR444,
+	PIXEL_ENCODING_YCBCR420,
+	PIXEL_ENCODING_COUNT
+};
+
+enum dc_aspect_ratio {
+	ASPECT_RATIO_NO_DATA,
+	ASPECT_RATIO_4_3,
+	ASPECT_RATIO_16_9,
+	ASPECT_RATIO_64_27,
+	ASPECT_RATIO_256_135,
+	ASPECT_RATIO_FUTURE
+};
+
+enum scanning_type {
+	SCANNING_TYPE_NODATA = 0,
+	SCANNING_TYPE_OVERSCAN,
+	SCANNING_TYPE_UNDERSCAN,
+	SCANNING_TYPE_FUTURE,
+	SCANNING_TYPE_UNDEFINED
+};
+
+struct dc_crtc_timing_flags {
+	uint32_t INTERLACE :1;
+	uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1,
+	 it is positive polarity --reversed with dal1 or video bios define*/
+	uint32_t VSYNC_POSITIVE_POLARITY :1; /* when set to 1,
+	 it is positive polarity --reversed with dal1 or video bios define*/
+
+	uint32_t HORZ_COUNT_BY_TWO:1;
+
+	uint32_t EXCLUSIVE_3D :1; /* if this bit set,
+	 timing can be driven in 3D format only
+	 and there is no corresponding 2D timing*/
+	uint32_t RIGHT_EYE_3D_POLARITY :1; /* 1 - means right eye polarity
+	 (right eye = '1', left eye = '0') */
+	uint32_t SUB_SAMPLE_3D :1; /* 1 - means left/right  images subsampled
+	 when mixed into 3D image. 0 - means summation (3D timing is doubled)*/
+	uint32_t USE_IN_3D_VIEW_ONLY :1; /* Do not use this timing in 2D View,
+	 because corresponding 2D timing also present in the list*/
+	uint32_t STEREO_3D_PREFERENCE :1; /* Means this is 2D timing
+	 and we want to match priority of corresponding 3D timing*/
+	uint32_t Y_ONLY :1;
+
+	uint32_t YCBCR420 :1; /* TODO: shouldn't need this flag, should be a separate pixel format */
+	uint32_t DTD_COUNTER :5; /* values 1 to 16 */
+
+	/* HDMI 2.0 - Support scrambling for TMDS character
+	 * rates less than or equal to 340Mcsc */
+	uint32_t LTE_340MCSC_SCRAMBLE:1;
+
+};
+
+struct dc_crtc_timing {
+
+	uint32_t h_total;
+	uint32_t h_border_left;
+	uint32_t h_addressable;
+	uint32_t h_border_right;
+	uint32_t h_front_porch;
+	uint32_t h_sync_width;
+
+	uint32_t v_total;
+	uint32_t v_border_top;
+	uint32_t v_addressable;
+	uint32_t v_border_bottom;
+	uint32_t v_front_porch;
+	uint32_t v_sync_width;
+
+	uint32_t pix_clk_khz;
+
+	uint32_t vic;
+	uint32_t hdmi_vic;
+	enum dc_timing_3d_format timing_3d_format;
+	enum dc_color_depth display_color_depth;
+	enum dc_pixel_encoding pixel_encoding;
+	enum dc_aspect_ratio aspect_ratio;
+	enum scanning_type scan_type;
+
+	struct dc_crtc_timing_flags flags;
+};
+
+#endif /* DC_HW_TYPES_H */
+

+ 493 - 0
drivers/gpu/drm/amd/display/dc/dc_types.h

@@ -0,0 +1,493 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef DC_TYPES_H_
+#define DC_TYPES_H_
+
+#include "fixed32_32.h"
+#include "fixed31_32.h"
+#include "irq_types.h"
+#include "dc_dp_types.h"
+#include "dc_hw_types.h"
+#include "dal_types.h"
+
+/* forward declarations */
+struct dc_surface;
+struct dc_target;
+struct dc_stream;
+struct dc_link;
+struct dc_sink;
+struct dal;
+
+/********************************
+ * Environment definitions
+ ********************************/
+enum dce_environment {
+	DCE_ENV_PRODUCTION_DRV = 0,
+	/* Emulation on FPGA, in "Maximus" System.
+	 * This environment enforces that *only* DC registers accessed.
+	 * (access to non-DC registers will hang FPGA) */
+	DCE_ENV_FPGA_MAXIMUS,
+	/* Emulation on real HW or on FPGA. Used by Diagnostics, enforces
+	 * requirements of Diagnostics team. */
+	DCE_ENV_DIAG
+};
+
+/* Note: use these macro definitions instead of direct comparison! */
+#define IS_FPGA_MAXIMUS_DC(dce_environment) \
+	(dce_environment == DCE_ENV_FPGA_MAXIMUS)
+
+#define IS_DIAG_DC(dce_environment) \
+	(IS_FPGA_MAXIMUS_DC(dce_environment) || (dce_environment == DCE_ENV_DIAG))
+
+struct hw_asic_id {
+	uint32_t chip_id;
+	uint32_t chip_family;
+	uint32_t pci_revision_id;
+	uint32_t hw_internal_rev;
+	uint32_t vram_type;
+	uint32_t vram_width;
+	uint32_t feature_flags;
+	uint32_t fake_paths_num;
+	void *atombios_base_address;
+};
+
+struct dc_context {
+	struct dc *dc;
+
+	void *driver_context; /* e.g. amdgpu_device */
+
+	struct dal_logger *logger;
+	void *cgs_device;
+
+	enum dce_environment dce_environment;
+	struct hw_asic_id asic_id;
+
+	/* todo: below should probably move to dc.  to facilitate removal
+	 * of AS we will store these here
+	 */
+	enum dce_version dce_version;
+	struct dc_bios *dc_bios;
+	bool created_bios;
+	struct gpio_service *gpio_service;
+	struct i2caux *i2caux;
+};
+
+
+#define MAX_EDID_BUFFER_SIZE 512
+#define EDID_BLOCK_SIZE 128
+#define MAX_SURFACE_NUM 2
+#define NUM_PIXEL_FORMATS 10
+
+#include "dc_ddc_types.h"
+
+enum tiling_mode {
+	TILING_MODE_INVALID,
+	TILING_MODE_LINEAR,
+	TILING_MODE_TILED,
+	TILING_MODE_COUNT
+};
+
+enum view_3d_format {
+	VIEW_3D_FORMAT_NONE = 0,
+	VIEW_3D_FORMAT_FRAME_SEQUENTIAL,
+	VIEW_3D_FORMAT_SIDE_BY_SIDE,
+	VIEW_3D_FORMAT_TOP_AND_BOTTOM,
+	VIEW_3D_FORMAT_COUNT,
+	VIEW_3D_FORMAT_FIRST = VIEW_3D_FORMAT_FRAME_SEQUENTIAL
+};
+
+enum plane_stereo_format {
+	PLANE_STEREO_FORMAT_NONE = 0,
+	PLANE_STEREO_FORMAT_SIDE_BY_SIDE = 1,
+	PLANE_STEREO_FORMAT_TOP_AND_BOTTOM = 2,
+	PLANE_STEREO_FORMAT_FRAME_ALTERNATE = 3,
+	PLANE_STEREO_FORMAT_ROW_INTERLEAVED = 5,
+	PLANE_STEREO_FORMAT_COLUMN_INTERLEAVED = 6,
+	PLANE_STEREO_FORMAT_CHECKER_BOARD = 7
+};
+
+/* TODO: Find way to calculate number of bits
+ *  Please increase if pixel_format enum increases
+ * num  from  PIXEL_FORMAT_INDEX8 to PIXEL_FORMAT_444BPP32
+ */
+
+enum dc_edid_connector_type {
+	EDID_CONNECTOR_UNKNOWN = 0,
+	EDID_CONNECTOR_ANALOG = 1,
+	EDID_CONNECTOR_DIGITAL = 10,
+	EDID_CONNECTOR_DVI = 11,
+	EDID_CONNECTOR_HDMIA = 12,
+	EDID_CONNECTOR_MDDI = 14,
+	EDID_CONNECTOR_DISPLAYPORT = 15
+};
+
+enum dc_edid_status {
+	EDID_OK,
+	EDID_BAD_INPUT,
+	EDID_NO_RESPONSE,
+	EDID_BAD_CHECKSUM,
+};
+
+/* audio capability from EDID*/
+struct dc_cea_audio_mode {
+	uint8_t format_code; /* ucData[0] [6:3]*/
+	uint8_t channel_count; /* ucData[0] [2:0]*/
+	uint8_t sample_rate; /* ucData[1]*/
+	union {
+		uint8_t sample_size; /* for LPCM*/
+		/*  for Audio Formats 2-8 (Max bit rate divided by 8 kHz)*/
+		uint8_t max_bit_rate;
+		uint8_t audio_codec_vendor_specific; /* for Audio Formats 9-15*/
+	};
+};
+
+struct dc_edid {
+	uint32_t length;
+	uint8_t raw_edid[MAX_EDID_BUFFER_SIZE];
+};
+
+/* When speaker location data block is not available, DEFAULT_SPEAKER_LOCATION
+ * is used. In this case we assume speaker location are: front left, front
+ * right and front center. */
+#define DEFAULT_SPEAKER_LOCATION 5
+
+#define DC_MAX_AUDIO_DESC_COUNT 16
+
+#define AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 20
+
+struct dc_edid_caps {
+	/* sink identification */
+	uint16_t manufacturer_id;
+	uint16_t product_id;
+	uint32_t serial_number;
+	uint8_t manufacture_week;
+	uint8_t manufacture_year;
+	uint8_t display_name[AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS];
+
+	/* audio caps */
+	uint8_t speaker_flags;
+	uint32_t audio_mode_count;
+	struct dc_cea_audio_mode audio_modes[DC_MAX_AUDIO_DESC_COUNT];
+	uint32_t audio_latency;
+	uint32_t video_latency;
+
+	/*HDMI 2.0 caps*/
+	bool lte_340mcsc_scramble;
+
+	bool edid_hdmi;
+};
+
+struct view {
+	uint32_t width;
+	uint32_t height;
+};
+
+struct dc_mode_flags {
+	/* note: part of refresh rate flag*/
+	uint32_t INTERLACE :1;
+	/* native display timing*/
+	uint32_t NATIVE :1;
+	/* preferred is the recommended mode, one per display */
+	uint32_t PREFERRED :1;
+	/* true if this mode should use reduced blanking timings
+	 *_not_ related to the Reduced Blanking adjustment*/
+	uint32_t REDUCED_BLANKING :1;
+	/* note: part of refreshrate flag*/
+	uint32_t VIDEO_OPTIMIZED_RATE :1;
+	/* should be reported to upper layers as mode_flags*/
+	uint32_t PACKED_PIXEL_FORMAT :1;
+	/*< preferred view*/
+	uint32_t PREFERRED_VIEW :1;
+	/* this timing should be used only in tiled mode*/
+	uint32_t TILED_MODE :1;
+	uint32_t DSE_MODE :1;
+	/* Refresh rate divider when Miracast sink is using a
+	 different rate than the output display device
+	 Must be zero for wired displays and non-zero for
+	 Miracast displays*/
+	uint32_t MIRACAST_REFRESH_DIVIDER;
+};
+
+
+enum dc_timing_source {
+	TIMING_SOURCE_UNDEFINED,
+
+	/* explicitly specifed by user, most important*/
+	TIMING_SOURCE_USER_FORCED,
+	TIMING_SOURCE_USER_OVERRIDE,
+	TIMING_SOURCE_CUSTOM,
+	TIMING_SOURCE_EXPLICIT,
+
+	/* explicitly specified by the display device, more important*/
+	TIMING_SOURCE_EDID_CEA_SVD_3D,
+	TIMING_SOURCE_EDID_CEA_SVD_PREFERRED,
+	TIMING_SOURCE_EDID_CEA_SVD_420,
+	TIMING_SOURCE_EDID_DETAILED,
+	TIMING_SOURCE_EDID_ESTABLISHED,
+	TIMING_SOURCE_EDID_STANDARD,
+	TIMING_SOURCE_EDID_CEA_SVD,
+	TIMING_SOURCE_EDID_CVT_3BYTE,
+	TIMING_SOURCE_EDID_4BYTE,
+	TIMING_SOURCE_VBIOS,
+	TIMING_SOURCE_CV,
+	TIMING_SOURCE_TV,
+	TIMING_SOURCE_HDMI_VIC,
+
+	/* implicitly specified by display device, still safe but less important*/
+	TIMING_SOURCE_DEFAULT,
+
+	/* only used for custom base modes */
+	TIMING_SOURCE_CUSTOM_BASE,
+
+	/* these timing might not work, least important*/
+	TIMING_SOURCE_RANGELIMIT,
+	TIMING_SOURCE_OS_FORCED,
+	TIMING_SOURCE_IMPLICIT,
+
+	/* only used by default mode list*/
+	TIMING_SOURCE_BASICMODE,
+
+	TIMING_SOURCE_COUNT
+};
+
+enum dc_timing_support_method {
+	TIMING_SUPPORT_METHOD_UNDEFINED,
+	TIMING_SUPPORT_METHOD_EXPLICIT,
+	TIMING_SUPPORT_METHOD_IMPLICIT,
+	TIMING_SUPPORT_METHOD_NATIVE
+};
+
+struct dc_mode_info {
+	uint32_t pixel_width;
+	uint32_t pixel_height;
+	uint32_t field_rate;
+	/* Vertical refresh rate for progressive modes.
+	* Field rate for interlaced modes.*/
+
+	enum dc_timing_standard timing_standard;
+	enum dc_timing_source timing_source;
+	struct dc_mode_flags flags;
+};
+
+enum dc_power_state {
+	DC_POWER_STATE_ON = 1,
+	DC_POWER_STATE_STANDBY,
+	DC_POWER_STATE_SUSPEND,
+	DC_POWER_STATE_OFF
+};
+
+/* DC PowerStates */
+enum dc_video_power_state {
+	DC_VIDEO_POWER_UNSPECIFIED = 0,
+	DC_VIDEO_POWER_ON = 1,
+	DC_VIDEO_POWER_STANDBY,
+	DC_VIDEO_POWER_SUSPEND,
+	DC_VIDEO_POWER_OFF,
+	DC_VIDEO_POWER_HIBERNATE,
+	DC_VIDEO_POWER_SHUTDOWN,
+	DC_VIDEO_POWER_ULPS,	/* BACO or Ultra-Light-Power-State */
+	DC_VIDEO_POWER_AFTER_RESET,
+	DC_VIDEO_POWER_MAXIMUM
+};
+
+enum dc_acpi_cm_power_state {
+	DC_ACPI_CM_POWER_STATE_D0 = 1,
+	DC_ACPI_CM_POWER_STATE_D1 = 2,
+	DC_ACPI_CM_POWER_STATE_D2 = 4,
+	DC_ACPI_CM_POWER_STATE_D3 = 8
+};
+
+enum dc_connection_type {
+	dc_connection_none,
+	dc_connection_single,
+	dc_connection_mst_branch,
+	dc_connection_active_dongle
+};
+
+struct dc_csc_adjustments {
+	struct fixed31_32 contrast;
+	struct fixed31_32 saturation;
+	struct fixed31_32 brightness;
+	struct fixed31_32 hue;
+};
+
+enum {
+	MAX_LANES = 2,
+	MAX_COFUNC_PATH = 6,
+	LAYER_INDEX_PRIMARY = -1,
+};
+
+/* Scaling format */
+enum scaling_transformation {
+	SCALING_TRANSFORMATION_UNINITIALIZED,
+	SCALING_TRANSFORMATION_IDENTITY = 0x0001,
+	SCALING_TRANSFORMATION_CENTER_TIMING = 0x0002,
+	SCALING_TRANSFORMATION_FULL_SCREEN_SCALE = 0x0004,
+	SCALING_TRANSFORMATION_PRESERVE_ASPECT_RATIO_SCALE = 0x0008,
+	SCALING_TRANSFORMATION_DAL_DECIDE = 0x0010,
+	SCALING_TRANSFORMATION_INVALID = 0x80000000,
+
+	/* Flag the first and last */
+	SCALING_TRANSFORMATION_BEGING = SCALING_TRANSFORMATION_IDENTITY,
+	SCALING_TRANSFORMATION_END =
+		SCALING_TRANSFORMATION_PRESERVE_ASPECT_RATIO_SCALE
+};
+
+/* audio*/
+
+union audio_sample_rates {
+	struct sample_rates {
+		uint8_t RATE_32:1;
+		uint8_t RATE_44_1:1;
+		uint8_t RATE_48:1;
+		uint8_t RATE_88_2:1;
+		uint8_t RATE_96:1;
+		uint8_t RATE_176_4:1;
+		uint8_t RATE_192:1;
+	} rate;
+
+	uint8_t all;
+};
+
+struct audio_speaker_flags {
+    uint32_t FL_FR:1;
+    uint32_t LFE:1;
+    uint32_t FC:1;
+    uint32_t RL_RR:1;
+    uint32_t RC:1;
+    uint32_t FLC_FRC:1;
+    uint32_t RLC_RRC:1;
+    uint32_t SUPPORT_AI:1;
+};
+
+struct audio_speaker_info {
+	uint32_t ALLSPEAKERS:7;
+	uint32_t SUPPORT_AI:1;
+};
+
+
+struct audio_info_flags {
+
+	union {
+
+		struct audio_speaker_flags speaker_flags;
+		struct audio_speaker_info   info;
+
+		uint8_t all;
+	};
+};
+
+enum audio_format_code {
+	AUDIO_FORMAT_CODE_FIRST = 1,
+	AUDIO_FORMAT_CODE_LINEARPCM = AUDIO_FORMAT_CODE_FIRST,
+
+	AUDIO_FORMAT_CODE_AC3,
+	/*Layers 1 & 2 */
+	AUDIO_FORMAT_CODE_MPEG1,
+	/*MPEG1 Layer 3 */
+	AUDIO_FORMAT_CODE_MP3,
+	/*multichannel */
+	AUDIO_FORMAT_CODE_MPEG2,
+	AUDIO_FORMAT_CODE_AAC,
+	AUDIO_FORMAT_CODE_DTS,
+	AUDIO_FORMAT_CODE_ATRAC,
+	AUDIO_FORMAT_CODE_1BITAUDIO,
+	AUDIO_FORMAT_CODE_DOLBYDIGITALPLUS,
+	AUDIO_FORMAT_CODE_DTS_HD,
+	AUDIO_FORMAT_CODE_MAT_MLP,
+	AUDIO_FORMAT_CODE_DST,
+	AUDIO_FORMAT_CODE_WMAPRO,
+	AUDIO_FORMAT_CODE_LAST,
+	AUDIO_FORMAT_CODE_COUNT =
+		AUDIO_FORMAT_CODE_LAST - AUDIO_FORMAT_CODE_FIRST
+};
+
+struct audio_mode {
+	 /* ucData[0] [6:3] */
+	enum audio_format_code format_code;
+	/* ucData[0] [2:0] */
+	uint8_t channel_count;
+	/* ucData[1] */
+	union audio_sample_rates sample_rates;
+	union {
+		/* for LPCM */
+		uint8_t sample_size;
+		/* for Audio Formats 2-8 (Max bit rate divided by 8 kHz) */
+		uint8_t max_bit_rate;
+		/* for Audio Formats 9-15 */
+		uint8_t vendor_specific;
+	};
+};
+
+struct audio_info {
+	struct audio_info_flags flags;
+	uint32_t video_latency;
+	uint32_t audio_latency;
+	uint32_t display_index;
+	uint8_t display_name[AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS];
+	uint32_t manufacture_id;
+	uint32_t product_id;
+	/* PortID used for ContainerID when defined */
+	uint32_t port_id[2];
+	uint32_t mode_count;
+	/* this field must be last in this struct */
+	struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];
+};
+
+struct freesync_context {
+	bool supported;
+	bool enabled;
+	bool active;
+
+	unsigned int min_refresh_in_micro_hz;
+	unsigned int nominal_refresh_in_micro_hz;
+};
+
+struct colorspace_transform {
+	struct fixed31_32 matrix[12];
+	bool enable_remap;
+};
+
+struct csc_transform {
+	uint16_t matrix[12];
+	bool enable_adjustment;
+};
+
+struct psr_caps {
+	/* These parameters are from PSR capabilities reported by Sink DPCD */
+	unsigned char psr_version;
+	unsigned int psr_rfb_setup_time;
+	bool psr_exit_link_training_required;
+
+	/* These parameters are calculated in Driver,
+	 * based on display timing and Sink capabilities.
+	 * If VBLANK region is too small and Sink takes a long time
+	 * to set up RFB, it may take an extra frame to enter PSR state.
+	 */
+	bool psr_frame_capture_indication_req;
+	unsigned int psr_sdp_transmit_line_num_deadline;
+};
+
+#endif /* DC_TYPES_H_ */

+ 14 - 0
drivers/gpu/drm/amd/display/dc/dce/Makefile

@@ -0,0 +1,14 @@
+#
+# Makefile for common 'dce' logic
+# HW object file under this folder follow similar pattern for HW programming
+#   - register offset and/or shift + mask stored in the dec_hw struct
+#   - register programming through common macros that look up register 
+#     offset/shift/mask stored in dce_hw struct
+
+DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
+dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o
+
+
+AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE)

+ 920 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_audio.c

@@ -0,0 +1,920 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dce_audio.h"
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define DCE_AUD(audio)\
+	container_of(audio, struct dce_audio, base)
+
+#define CTX \
+	aud->base.ctx
+#define REG(reg)\
+	(aud->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+	aud->shifts->field_name, aud->masks->field_name
+
+#define IX_REG(reg)\
+	ix ## reg
+
+#define AZ_REG_READ(reg_name) \
+		read_indirect_azalia_reg(audio, IX_REG(reg_name))
+
+#define AZ_REG_WRITE(reg_name, value) \
+		write_indirect_azalia_reg(audio, IX_REG(reg_name), value)
+
+static void write_indirect_azalia_reg(struct audio *audio,
+	uint32_t reg_index,
+	uint32_t reg_data)
+{
+	struct dce_audio *aud = DCE_AUD(audio);
+
+	/* AZALIA_F0_CODEC_ENDPOINT_INDEX  endpoint index  */
+	REG_SET(AZALIA_F0_CODEC_ENDPOINT_INDEX, 0,
+			AZALIA_ENDPOINT_REG_INDEX, reg_index);
+
+	/* AZALIA_F0_CODEC_ENDPOINT_DATA  endpoint data  */
+	REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0,
+			AZALIA_ENDPOINT_REG_DATA, reg_data);
+
+	dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+		"AUDIO:write_indirect_azalia_reg: index: %u  data: %u\n",
+		reg_index, reg_data);
+}
+
+static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index)
+{
+	struct dce_audio *aud = DCE_AUD(audio);
+
+	uint32_t value = 0;
+
+	/* AZALIA_F0_CODEC_ENDPOINT_INDEX  endpoint index  */
+	REG_SET(AZALIA_F0_CODEC_ENDPOINT_INDEX, 0,
+			AZALIA_ENDPOINT_REG_INDEX, reg_index);
+
+	/* AZALIA_F0_CODEC_ENDPOINT_DATA  endpoint data  */
+	value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA);
+
+	dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+		"AUDIO:read_indirect_azalia_reg: index: %u  data: %u\n",
+		reg_index, value);
+
+	return value;
+}
+
+static bool is_audio_format_supported(
+	const struct audio_info *audio_info,
+	enum audio_format_code audio_format_code,
+	uint32_t *format_index)
+{
+	uint32_t index;
+	uint32_t max_channe_index = 0;
+	bool found = false;
+
+	if (audio_info == NULL)
+		return found;
+
+	/* pass through whole array */
+	for (index = 0; index < audio_info->mode_count; index++) {
+		if (audio_info->modes[index].format_code == audio_format_code) {
+			if (found) {
+				/* format has multiply entries, choose one with
+				 *  highst number of channels */
+				if (audio_info->modes[index].channel_count >
+		audio_info->modes[max_channe_index].channel_count) {
+					max_channe_index = index;
+				}
+			} else {
+				/* format found, save it's index */
+				found = true;
+				max_channe_index = index;
+			}
+		}
+	}
+
+	/* return index */
+	if (found && format_index != NULL)
+		*format_index = max_channe_index;
+
+	return found;
+}
+
+/*For HDMI, calculate if specified sample rates can fit into a given timing */
+static void check_audio_bandwidth_hdmi(
+	const struct audio_crtc_info *crtc_info,
+	uint32_t channel_count,
+	union audio_sample_rates *sample_rates)
+{
+	uint32_t samples;
+	uint32_t  h_blank;
+	bool limit_freq_to_48_khz = false;
+	bool limit_freq_to_88_2_khz = false;
+	bool limit_freq_to_96_khz = false;
+	bool limit_freq_to_174_4_khz = false;
+
+	/* For two channels supported return whatever sink support,unmodified*/
+	if (channel_count > 2) {
+
+		/* Based on HDMI spec 1.3 Table 7.5 */
+		if ((crtc_info->requested_pixel_clock <= 27000) &&
+		(crtc_info->v_active <= 576) &&
+		!(crtc_info->interlaced) &&
+		!(crtc_info->pixel_repetition == 2 ||
+		crtc_info->pixel_repetition == 4)) {
+			limit_freq_to_48_khz = true;
+
+		} else if ((crtc_info->requested_pixel_clock <= 27000) &&
+				(crtc_info->v_active <= 576) &&
+				(crtc_info->interlaced) &&
+				(crtc_info->pixel_repetition == 2)) {
+			limit_freq_to_88_2_khz = true;
+
+		} else if ((crtc_info->requested_pixel_clock <= 54000) &&
+				(crtc_info->v_active <= 576) &&
+				!(crtc_info->interlaced)) {
+			limit_freq_to_174_4_khz = true;
+		}
+	}
+
+	/* Also do some calculation for the available Audio Bandwidth for the
+	 * 8 ch (i.e. for the Layout 1 => ch > 2)
+	 */
+	h_blank = crtc_info->h_total - crtc_info->h_active;
+
+	if (crtc_info->pixel_repetition)
+		h_blank *= crtc_info->pixel_repetition;
+
+	/*based on HDMI spec 1.3 Table 7.5 */
+	h_blank -= 58;
+	/*for Control Period */
+	h_blank -= 16;
+
+	samples = h_blank * 10;
+	/* Number of Audio Packets (multiplied by 10) per Line (for 8 ch number
+	 * of Audio samples per line multiplied by 10 - Layout 1)
+	 */
+	 samples /= 32;
+	 samples *= crtc_info->v_active;
+	 /*Number of samples multiplied by 10, per second */
+	 samples *= crtc_info->refresh_rate;
+	 /*Number of Audio samples per second */
+	 samples /= 10;
+
+	 /* @todo do it after deep color is implemented
+	  * 8xx - deep color bandwidth scaling
+	  * Extra bandwidth is avaliable in deep color b/c link runs faster than
+	  * pixel rate. This has the effect of allowing more tmds characters to
+	  * be transmitted during blank
+	  */
+
+	switch (crtc_info->color_depth) {
+	case COLOR_DEPTH_888:
+		samples *= 4;
+		break;
+	case COLOR_DEPTH_101010:
+		samples *= 5;
+		break;
+	case COLOR_DEPTH_121212:
+		samples *= 6;
+		break;
+	default:
+		samples *= 4;
+		break;
+	}
+
+	samples /= 4;
+
+	/*check limitation*/
+	if (samples < 88200)
+		limit_freq_to_48_khz = true;
+	else if (samples < 96000)
+		limit_freq_to_88_2_khz = true;
+	else if (samples < 176400)
+		limit_freq_to_96_khz = true;
+	else if (samples < 192000)
+		limit_freq_to_174_4_khz = true;
+
+	if (sample_rates != NULL) {
+		/* limit frequencies */
+		if (limit_freq_to_174_4_khz)
+			sample_rates->rate.RATE_192 = 0;
+
+		if (limit_freq_to_96_khz) {
+			sample_rates->rate.RATE_192 = 0;
+			sample_rates->rate.RATE_176_4 = 0;
+		}
+		if (limit_freq_to_88_2_khz) {
+			sample_rates->rate.RATE_192 = 0;
+			sample_rates->rate.RATE_176_4 = 0;
+			sample_rates->rate.RATE_96 = 0;
+		}
+		if (limit_freq_to_48_khz) {
+			sample_rates->rate.RATE_192 = 0;
+			sample_rates->rate.RATE_176_4 = 0;
+			sample_rates->rate.RATE_96 = 0;
+			sample_rates->rate.RATE_88_2 = 0;
+		}
+	}
+}
+
+/*For DP SST, calculate if specified sample rates can fit into a given timing */
+static void check_audio_bandwidth_dpsst(
+	const struct audio_crtc_info *crtc_info,
+	uint32_t channel_count,
+	union audio_sample_rates *sample_rates)
+{
+	/* do nothing */
+}
+
+/*For DP MST, calculate if specified sample rates can fit into a given timing */
+static void check_audio_bandwidth_dpmst(
+	const struct audio_crtc_info *crtc_info,
+	uint32_t channel_count,
+	union audio_sample_rates *sample_rates)
+{
+	/* do nothing  */
+}
+
+static void check_audio_bandwidth(
+	const struct audio_crtc_info *crtc_info,
+	uint32_t channel_count,
+	enum signal_type signal,
+	union audio_sample_rates *sample_rates)
+{
+	switch (signal) {
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		check_audio_bandwidth_hdmi(
+			crtc_info, channel_count, sample_rates);
+		break;
+	case SIGNAL_TYPE_EDP:
+	case SIGNAL_TYPE_DISPLAY_PORT:
+		check_audio_bandwidth_dpsst(
+			crtc_info, channel_count, sample_rates);
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		check_audio_bandwidth_dpmst(
+			crtc_info, channel_count, sample_rates);
+		break;
+	default:
+		break;
+	}
+}
+
+/* expose/not expose HBR capability to Audio driver */
+static void set_high_bit_rate_capable(
+	struct audio *audio,
+	bool capable)
+{
+	uint32_t value = 0;
+
+	/* set high bit rate audio capable*/
+	value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR);
+
+	set_reg_field_value(value, capable,
+		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR,
+		HBR_CAPABLE);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR, value);
+}
+
+/* set video latency in in ms/2+1 */
+static void set_video_latency(
+	struct audio *audio,
+	int latency_in_ms)
+{
+	uint32_t value = 0;
+
+	if ((latency_in_ms < 0) || (latency_in_ms > 255))
+		return;
+
+	value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC);
+
+	set_reg_field_value(value, latency_in_ms,
+		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+		VIDEO_LIPSYNC);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+		value);
+}
+
+/* set audio latency in in ms/2+1 */
+static void set_audio_latency(
+	struct audio *audio,
+	int latency_in_ms)
+{
+	uint32_t value = 0;
+
+	if (latency_in_ms < 0)
+		latency_in_ms = 0;
+
+	if (latency_in_ms > 255)
+		latency_in_ms = 255;
+
+	value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC);
+
+	set_reg_field_value(value, latency_in_ms,
+		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+		AUDIO_LIPSYNC);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+		value);
+}
+
+void dce_aud_az_enable(struct audio *audio)
+{
+	uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+
+	if (get_reg_field_value(value,
+			AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+			AUDIO_ENABLED) != 1)
+		set_reg_field_value(value, 1,
+			AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+			AUDIO_ENABLED);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+}
+
+void dce_aud_az_disable(struct audio *audio)
+{
+	uint32_t value;
+
+	value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+
+	set_reg_field_value(value, 0,
+		AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+		AUDIO_ENABLED);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+}
+
+void dce_aud_az_configure(
+	struct audio *audio,
+	enum signal_type signal,
+	const struct audio_crtc_info *crtc_info,
+	const struct audio_info *audio_info)
+{
+	struct dce_audio *aud = DCE_AUD(audio);
+
+	uint32_t speakers = audio_info->flags.info.ALLSPEAKERS;
+	uint32_t value;
+	uint32_t field = 0;
+	enum audio_format_code audio_format_code;
+	uint32_t format_index;
+	uint32_t index;
+	bool is_ac3_supported = false;
+	union audio_sample_rates sample_rate;
+	uint32_t strlen = 0;
+
+	/* Speaker Allocation */
+	/*
+	uint32_t value;
+	uint32_t field = 0;*/
+	value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+
+	set_reg_field_value(value,
+		speakers,
+		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+		SPEAKER_ALLOCATION);
+
+	/* LFE_PLAYBACK_LEVEL = LFEPBL
+	 * LFEPBL = 0 : Unknown or refer to other information
+	 * LFEPBL = 1 : 0dB playback
+	 * LFEPBL = 2 : +10dB playback
+	 * LFE_BL = 3 : Reserved
+	 */
+	set_reg_field_value(value,
+		0,
+		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+		LFE_PLAYBACK_LEVEL);
+	/* todo: according to reg spec LFE_PLAYBACK_LEVEL is read only.
+	 *  why are we writing to it?  DCE8 does not write this */
+
+
+	set_reg_field_value(value,
+		0,
+		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+		HDMI_CONNECTION);
+
+	set_reg_field_value(value,
+		0,
+		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+		DP_CONNECTION);
+
+	field = get_reg_field_value(value,
+			AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+			EXTRA_CONNECTION_INFO);
+
+	field &= ~0x1;
+
+	set_reg_field_value(value,
+		field,
+		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+		EXTRA_CONNECTION_INFO);
+
+	/* set audio for output signal */
+	switch (signal) {
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		set_reg_field_value(value,
+			1,
+			AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+			HDMI_CONNECTION);
+
+		break;
+
+	case SIGNAL_TYPE_EDP:
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		set_reg_field_value(value,
+			1,
+			AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+			DP_CONNECTION);
+		break;
+	default:
+		BREAK_TO_DEBUGGER();
+		break;
+	}
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, value);
+
+	/*  Audio Descriptors   */
+	/* pass through all formats */
+	for (format_index = 0; format_index < AUDIO_FORMAT_CODE_COUNT;
+			format_index++) {
+		audio_format_code =
+			(AUDIO_FORMAT_CODE_FIRST + format_index);
+
+		/* those are unsupported, skip programming */
+		if (audio_format_code == AUDIO_FORMAT_CODE_1BITAUDIO ||
+			audio_format_code == AUDIO_FORMAT_CODE_DST)
+			continue;
+
+		value = 0;
+
+		/* check if supported */
+		if (is_audio_format_supported(
+				audio_info, audio_format_code, &index)) {
+			const struct audio_mode *audio_mode =
+					&audio_info->modes[index];
+			union audio_sample_rates sample_rates =
+					audio_mode->sample_rates;
+			uint8_t byte2 = audio_mode->max_bit_rate;
+
+			/* adjust specific properties */
+			switch (audio_format_code) {
+			case AUDIO_FORMAT_CODE_LINEARPCM: {
+				check_audio_bandwidth(
+					crtc_info,
+					audio_mode->channel_count,
+					signal,
+					&sample_rates);
+
+				byte2 = audio_mode->sample_size;
+
+				set_reg_field_value(value,
+						sample_rates.all,
+						AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+						SUPPORTED_FREQUENCIES_STEREO);
+				}
+				break;
+			case AUDIO_FORMAT_CODE_AC3:
+				is_ac3_supported = true;
+				break;
+			case AUDIO_FORMAT_CODE_DOLBYDIGITALPLUS:
+			case AUDIO_FORMAT_CODE_DTS_HD:
+			case AUDIO_FORMAT_CODE_MAT_MLP:
+			case AUDIO_FORMAT_CODE_DST:
+			case AUDIO_FORMAT_CODE_WMAPRO:
+				byte2 = audio_mode->vendor_specific;
+				break;
+			default:
+				break;
+			}
+
+			/* fill audio format data */
+			set_reg_field_value(value,
+					audio_mode->channel_count - 1,
+					AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+					MAX_CHANNELS);
+
+			set_reg_field_value(value,
+					sample_rates.all,
+					AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+					SUPPORTED_FREQUENCIES);
+
+			set_reg_field_value(value,
+					byte2,
+					AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+					DESCRIPTOR_BYTE_2);
+		} /* if */
+
+		AZ_REG_WRITE(
+				AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 + format_index,
+				value);
+	} /* for */
+
+	if (is_ac3_supported)
+		/* todo: this reg global.  why program global register? */
+		REG_WRITE(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS,
+				0x05);
+
+	/* check for 192khz/8-Ch support for HBR requirements */
+	sample_rate.all = 0;
+	sample_rate.rate.RATE_192 = 1;
+
+	check_audio_bandwidth(
+		crtc_info,
+		8,
+		signal,
+		&sample_rate);
+
+	set_high_bit_rate_capable(audio, sample_rate.rate.RATE_192);
+
+	/* Audio and Video Lipsync */
+	set_video_latency(audio, audio_info->video_latency);
+	set_audio_latency(audio, audio_info->audio_latency);
+
+	value = 0;
+	set_reg_field_value(value, audio_info->manufacture_id,
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
+		MANUFACTURER_ID);
+
+	set_reg_field_value(value, audio_info->product_id,
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
+		PRODUCT_ID);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
+		value);
+
+	value = 0;
+
+	/*get display name string length */
+	while (audio_info->display_name[strlen++] != '\0') {
+		if (strlen >=
+		MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS)
+			break;
+		}
+	set_reg_field_value(value, strlen,
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
+		SINK_DESCRIPTION_LEN);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
+		value);
+
+	/*
+	*write the port ID:
+	*PORT_ID0 = display index
+	*PORT_ID1 = 16bit BDF
+	*(format MSB->LSB: 8bit Bus, 5bit Device, 3bit Function)
+	*/
+
+	value = 0;
+
+	set_reg_field_value(value, audio_info->port_id[0],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2,
+		PORT_ID0);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2, value);
+
+	value = 0;
+	set_reg_field_value(value, audio_info->port_id[1],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3,
+		PORT_ID1);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3, value);
+
+	/*write the 18 char monitor string */
+
+	value = 0;
+	set_reg_field_value(value, audio_info->display_name[0],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+		DESCRIPTION0);
+
+	set_reg_field_value(value, audio_info->display_name[1],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+		DESCRIPTION1);
+
+	set_reg_field_value(value, audio_info->display_name[2],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+		DESCRIPTION2);
+
+	set_reg_field_value(value, audio_info->display_name[3],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+		DESCRIPTION3);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4, value);
+
+	value = 0;
+	set_reg_field_value(value, audio_info->display_name[4],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+		DESCRIPTION4);
+
+	set_reg_field_value(value, audio_info->display_name[5],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+		DESCRIPTION5);
+
+	set_reg_field_value(value, audio_info->display_name[6],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+		DESCRIPTION6);
+
+	set_reg_field_value(value, audio_info->display_name[7],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+		DESCRIPTION7);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5, value);
+
+	value = 0;
+	set_reg_field_value(value, audio_info->display_name[8],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+		DESCRIPTION8);
+
+	set_reg_field_value(value, audio_info->display_name[9],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+		DESCRIPTION9);
+
+	set_reg_field_value(value, audio_info->display_name[10],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+		DESCRIPTION10);
+
+	set_reg_field_value(value, audio_info->display_name[11],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+		DESCRIPTION11);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6, value);
+
+	value = 0;
+	set_reg_field_value(value, audio_info->display_name[12],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+		DESCRIPTION12);
+
+	set_reg_field_value(value, audio_info->display_name[13],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+		DESCRIPTION13);
+
+	set_reg_field_value(value, audio_info->display_name[14],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+		DESCRIPTION14);
+
+	set_reg_field_value(value, audio_info->display_name[15],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+		DESCRIPTION15);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7, value);
+
+	value = 0;
+	set_reg_field_value(value, audio_info->display_name[16],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8,
+		DESCRIPTION16);
+
+	set_reg_field_value(value, audio_info->display_name[17],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8,
+		DESCRIPTION17);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8, value);
+}
+
+/*
+* todo: wall clk related functionality probably belong to clock_src.
+*/
+
+/* search pixel clock value for Azalia HDMI Audio */
+static bool get_azalia_clock_info_hdmi(
+	uint32_t crtc_pixel_clock_in_khz,
+	uint32_t actual_pixel_clock_in_khz,
+	struct azalia_clock_info *azalia_clock_info)
+{
+	if (azalia_clock_info == NULL)
+		return false;
+
+	/* audio_dto_phase= 24 * 10,000;
+	 *   24MHz in [100Hz] units */
+	azalia_clock_info->audio_dto_phase =
+			24 * 10000;
+
+	/* audio_dto_module = PCLKFrequency * 10,000;
+	 *  [khz] -> [100Hz] */
+	azalia_clock_info->audio_dto_module =
+			actual_pixel_clock_in_khz * 10;
+
+	return true;
+}
+
+static bool get_azalia_clock_info_dp(
+	uint32_t requested_pixel_clock_in_khz,
+	const struct audio_pll_info *pll_info,
+	struct azalia_clock_info *azalia_clock_info)
+{
+	if (pll_info == NULL || azalia_clock_info == NULL)
+		return false;
+
+	/* Reported dpDtoSourceClockInkhz value for
+	 * DCE8 already adjusted for SS, do not need any
+	 * adjustment here anymore
+	 */
+
+	/*audio_dto_phase = 24 * 10,000;
+	 * 24MHz in [100Hz] units */
+	azalia_clock_info->audio_dto_phase = 24 * 10000;
+
+	/*audio_dto_module = dpDtoSourceClockInkhz * 10,000;
+	 *  [khz] ->[100Hz] */
+	azalia_clock_info->audio_dto_module =
+		pll_info->dp_dto_source_clock_in_khz * 10;
+
+	return true;
+}
+
+void dce_aud_wall_dto_setup(
+	struct audio *audio,
+	enum signal_type signal,
+	const struct audio_crtc_info *crtc_info,
+	const struct audio_pll_info *pll_info)
+{
+	struct dce_audio *aud = DCE_AUD(audio);
+
+	struct azalia_clock_info clock_info = { 0 };
+
+	if (dc_is_hdmi_signal(signal)) {
+		uint32_t src_sel;
+
+		/*DTO0 Programming goal:
+		-generate 24MHz, 128*Fs from 24MHz
+		-use DTO0 when an active HDMI port is connected
+		(optionally a DP is connected) */
+
+		/* calculate DTO settings */
+		get_azalia_clock_info_hdmi(
+			crtc_info->requested_pixel_clock,
+			crtc_info->calculated_pixel_clock,
+			&clock_info);
+
+		/* On TN/SI, Program DTO source select and DTO select before
+		programming DTO modulo and DTO phase. These bits must be
+		programmed first, otherwise there will be no HDMI audio at boot
+		up. This is a HW sequence change (different from old ASICs).
+		Caution when changing this programming sequence.
+
+		HDMI enabled, using DTO0
+		program master CRTC for DTO0 */
+		src_sel = pll_info->dto_source - DTO_SOURCE_ID0;
+		REG_UPDATE_2(DCCG_AUDIO_DTO_SOURCE,
+			DCCG_AUDIO_DTO0_SOURCE_SEL, src_sel,
+			DCCG_AUDIO_DTO_SEL, 0);
+
+		/* module */
+		REG_UPDATE(DCCG_AUDIO_DTO0_MODULE,
+			DCCG_AUDIO_DTO0_MODULE, clock_info.audio_dto_module);
+
+		/* phase */
+		REG_UPDATE(DCCG_AUDIO_DTO0_PHASE,
+			DCCG_AUDIO_DTO0_PHASE, clock_info.audio_dto_phase);
+	} else {
+		/*DTO1 Programming goal:
+		-generate 24MHz, 512*Fs, 128*Fs from 24MHz
+		-default is to used DTO1, and switch to DTO0 when an audio
+		master HDMI port is connected
+		-use as default for DP
+
+		calculate DTO settings */
+		get_azalia_clock_info_dp(
+			crtc_info->requested_pixel_clock,
+			pll_info,
+			&clock_info);
+
+		/* Program DTO select before programming DTO modulo and DTO
+		phase. default to use DTO1 */
+
+		REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+				DCCG_AUDIO_DTO_SEL, 1);
+
+		REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+			DCCG_AUDIO_DTO_SEL, 1);
+			/* DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1)
+			 * Select 512fs for DP TODO: web register definition
+			 * does not match register header file
+			 * DCE11 version it's commented out while DCE8 it's set to 1
+			*/
+
+		/* module */
+		REG_UPDATE(DCCG_AUDIO_DTO1_MODULE,
+				DCCG_AUDIO_DTO1_MODULE, clock_info.audio_dto_module);
+
+		/* phase */
+		REG_UPDATE(DCCG_AUDIO_DTO1_PHASE,
+				DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase);
+
+		/* DAL2 code separate DCCG_AUDIO_DTO_SEL and
+		DCCG_AUDIO_DTO2_USE_512FBR_DTO programming into two different
+		location. merge together should not hurt */
+		/*value.bits.DCCG_AUDIO_DTO2_USE_512FBR_DTO = 1;
+		dal_write_reg(mmDCCG_AUDIO_DTO_SOURCE, value);*/
+	}
+}
+
+bool dce_aud_endpoint_valid(
+		struct audio *audio)
+{
+	uint32_t value;
+	uint32_t port_connectivity;
+
+	value = AZ_REG_READ(
+			AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+
+	port_connectivity = get_reg_field_value(value,
+			AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
+			PORT_CONNECTIVITY);
+
+	return !(port_connectivity == 1);
+}
+
+/* initialize HW state */
+void dce_aud_hw_init(
+		struct audio *audio)
+{
+	struct dce_audio *aud = DCE_AUD(audio);
+
+	/* we only need to program the following registers once, so we only do
+	it for the inst 0*/
+	if (audio->inst != 0)
+		return;
+
+	/* Suport R5 - 32khz
+	 * Suport R6 - 44.1khz
+	 * Suport R7 - 48khz
+	 */
+	REG_UPDATE(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES,
+			AUDIO_RATE_CAPABILITIES, 0x70);
+
+	/*Keep alive bit to verify HW block in BU. */
+	REG_UPDATE_2(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES,
+			CLKSTOP, 1,
+			EPSS, 1);
+}
+
+static const struct audio_funcs funcs = {
+	.endpoint_valid = dce_aud_endpoint_valid,
+	.hw_init = dce_aud_hw_init,
+	.wall_dto_setup = dce_aud_wall_dto_setup,
+	.az_enable = dce_aud_az_enable,
+	.az_disable = dce_aud_az_disable,
+	.az_configure = dce_aud_az_configure,
+	.destroy = dce_aud_destroy,
+};
+
+void dce_aud_destroy(struct audio **audio)
+{
+	dm_free(*audio);
+	*audio = NULL;
+}
+
+struct audio *dce_audio_create(
+		struct dc_context *ctx,
+		unsigned int inst,
+		const struct dce_audio_registers *reg,
+		const struct dce_audio_shift *shifts,
+		const struct dce_aduio_mask *masks
+		)
+{
+	struct dce_audio *audio = dm_alloc(sizeof(*audio));
+
+	if (audio == NULL) {
+		ASSERT_CRITICAL(audio);
+		return NULL;
+	}
+
+	audio->base.ctx = ctx;
+	audio->base.inst = inst;
+	audio->base.funcs = &funcs;
+
+	audio->regs = reg;
+	audio->shifts = shifts;
+	audio->masks = masks;
+
+	return &audio->base;
+}
+

+ 145 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_audio.h

@@ -0,0 +1,145 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DAL_AUDIO_DCE_110_H__
+#define __DAL_AUDIO_DCE_110_H__
+
+#include "audio.h"
+
+#define AUD_COMMON_REG_LIST(id)\
+	SRI(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZF0ENDPOINT, id),\
+	SRI(AZALIA_F0_CODEC_ENDPOINT_DATA, AZF0ENDPOINT, id),\
+	SR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS),\
+	SR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES),\
+	SR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES),\
+	SR(DCCG_AUDIO_DTO_SOURCE),\
+	SR(DCCG_AUDIO_DTO0_MODULE),\
+	SR(DCCG_AUDIO_DTO0_PHASE),\
+	SR(DCCG_AUDIO_DTO1_MODULE),\
+	SR(DCCG_AUDIO_DTO1_PHASE)
+
+
+ /* set field name */
+#define SF(reg_name, field_name, post_fix)\
+	.field_name = reg_name ## __ ## field_name ## post_fix
+
+
+#define AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)\
+		SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
+		SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
+		SF(DCCG_AUDIO_DTO0_MODULE, DCCG_AUDIO_DTO0_MODULE, mask_sh),\
+		SF(DCCG_AUDIO_DTO0_PHASE, DCCG_AUDIO_DTO0_PHASE, mask_sh),\
+		SF(DCCG_AUDIO_DTO0_MODULE, DCCG_AUDIO_DTO0_MODULE, mask_sh),\
+		SF(DCCG_AUDIO_DTO0_PHASE, DCCG_AUDIO_DTO0_PHASE, mask_sh),\
+		SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES, AUDIO_RATE_CAPABILITIES, mask_sh),\
+		SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, CLKSTOP, mask_sh),\
+		SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, EPSS, mask_sh)
+
+#define AUD_COMMON_MASK_SH_LIST(mask_sh)\
+		AUD_COMMON_MASK_SH_LIST_BASE(mask_sh),\
+		SF(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+		SF(AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh)
+
+
+struct dce_audio_registers {
+	uint32_t AZALIA_F0_CODEC_ENDPOINT_INDEX;
+	uint32_t AZALIA_F0_CODEC_ENDPOINT_DATA;
+
+	uint32_t AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS;
+	uint32_t AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES;
+	uint32_t AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES;
+
+	uint32_t DCCG_AUDIO_DTO_SOURCE;
+	uint32_t DCCG_AUDIO_DTO0_MODULE;
+	uint32_t DCCG_AUDIO_DTO0_PHASE;
+	uint32_t DCCG_AUDIO_DTO1_MODULE;
+	uint32_t DCCG_AUDIO_DTO1_PHASE;
+
+	uint32_t AUDIO_RATE_CAPABILITIES;
+};
+
+struct dce_audio_shift {
+	uint8_t AZALIA_ENDPOINT_REG_INDEX;
+	uint8_t AZALIA_ENDPOINT_REG_DATA;
+
+	uint8_t AUDIO_RATE_CAPABILITIES;
+	uint8_t CLKSTOP;
+	uint8_t EPSS;
+
+	uint8_t DCCG_AUDIO_DTO0_SOURCE_SEL;
+	uint8_t DCCG_AUDIO_DTO_SEL;
+	uint8_t DCCG_AUDIO_DTO0_MODULE;
+	uint8_t DCCG_AUDIO_DTO0_PHASE;
+	uint8_t DCCG_AUDIO_DTO1_MODULE;
+	uint8_t DCCG_AUDIO_DTO1_PHASE;
+};
+
+struct dce_aduio_mask {
+	uint32_t AZALIA_ENDPOINT_REG_INDEX;
+	uint32_t AZALIA_ENDPOINT_REG_DATA;
+
+	uint32_t AUDIO_RATE_CAPABILITIES;
+	uint32_t CLKSTOP;
+	uint32_t EPSS;
+
+	uint32_t DCCG_AUDIO_DTO0_SOURCE_SEL;
+	uint32_t DCCG_AUDIO_DTO_SEL;
+	uint32_t DCCG_AUDIO_DTO0_MODULE;
+	uint32_t DCCG_AUDIO_DTO0_PHASE;
+	uint32_t DCCG_AUDIO_DTO1_MODULE;
+	uint32_t DCCG_AUDIO_DTO1_PHASE;
+};
+
+struct dce_audio {
+	struct audio base;
+	const struct dce_audio_registers *regs;
+	const struct dce_audio_shift *shifts;
+	const struct dce_aduio_mask *masks;
+};
+
+struct audio *dce_audio_create(
+		struct dc_context *ctx,
+		unsigned int inst,
+		const struct dce_audio_registers *reg,
+		const struct dce_audio_shift *shifts,
+		const struct dce_aduio_mask *masks);
+
+void dce_aud_destroy(struct audio **audio);
+
+void dce_aud_hw_init(struct audio *audio);
+
+void dce_aud_az_enable(struct audio *audio);
+void dce_aud_az_disable(struct audio *audio);
+
+void dce_aud_az_configure(struct audio *audio,
+	enum signal_type signal,
+	const struct audio_crtc_info *crtc_info,
+	const struct audio_info *audio_info);
+
+void dce_aud_wall_dto_setup(struct audio *audio,
+	enum signal_type signal,
+	const struct audio_crtc_info *crtc_info,
+	const struct audio_pll_info *pll_info);
+
+#endif   /*__DAL_AUDIO_DCE_110_H__*/

+ 1264 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c

@@ -0,0 +1,1264 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+
+#include "dc_types.h"
+#include "core_types.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+
+#include "dce_clock_source.h"
+
+#include "reg_helper.h"
+
+#define REG(reg)\
+	(clk_src->regs->reg)
+
+#define CTX \
+	clk_src->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+	clk_src->cs_shift->field_name, clk_src->cs_mask->field_name
+
+#define FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM 6
+#define CALC_PLL_CLK_SRC_ERR_TOLERANCE 1
+#define MAX_PLL_CALC_ERROR 0xFFFFFFFF
+
+static const struct spread_spectrum_data *get_ss_data_entry(
+		struct dce110_clk_src *clk_src,
+		enum signal_type signal,
+		uint32_t pix_clk_khz)
+{
+
+	uint32_t entrys_num;
+	uint32_t i;
+	struct spread_spectrum_data *ss_parm = NULL;
+	struct spread_spectrum_data *ret = NULL;
+
+	switch (signal) {
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+		ss_parm = clk_src->dvi_ss_params;
+		entrys_num = clk_src->dvi_ss_params_cnt;
+		break;
+
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		ss_parm = clk_src->hdmi_ss_params;
+		entrys_num = clk_src->hdmi_ss_params_cnt;
+		break;
+
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+	case SIGNAL_TYPE_EDP:
+	case SIGNAL_TYPE_VIRTUAL:
+		ss_parm = clk_src->dp_ss_params;
+		entrys_num = clk_src->dp_ss_params_cnt;
+		break;
+
+	default:
+		ss_parm = NULL;
+		entrys_num = 0;
+		break;
+	}
+
+	if (ss_parm == NULL)
+		return ret;
+
+	for (i = 0; i < entrys_num; ++i, ++ss_parm) {
+		if (ss_parm->freq_range_khz >= pix_clk_khz) {
+			ret = ss_parm;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+/**
+* Function: calculate_fb_and_fractional_fb_divider
+*
+* * DESCRIPTION: Calculates feedback and fractional feedback dividers values
+*
+*PARAMETERS:
+* targetPixelClock             Desired frequency in 10 KHz
+* ref_divider                  Reference divider (already known)
+* postDivider                  Post Divider (already known)
+* feedback_divider_param       Pointer where to store
+*					calculated feedback divider value
+* fract_feedback_divider_param Pointer where to store
+*					calculated fract feedback divider value
+*
+*RETURNS:
+* It fills the locations pointed by feedback_divider_param
+*					and fract_feedback_divider_param
+* It returns	- true if feedback divider not 0
+*		- false should never happen)
+*/
+static bool calculate_fb_and_fractional_fb_divider(
+		struct calc_pll_clock_source *calc_pll_cs,
+		uint32_t target_pix_clk_khz,
+		uint32_t ref_divider,
+		uint32_t post_divider,
+		uint32_t *feedback_divider_param,
+		uint32_t *fract_feedback_divider_param)
+{
+	uint64_t feedback_divider;
+
+	feedback_divider =
+		(uint64_t)(target_pix_clk_khz * ref_divider * post_divider);
+	feedback_divider *= 10;
+	/* additional factor, since we divide by 10 afterwards */
+	feedback_divider *= (uint64_t)(calc_pll_cs->fract_fb_divider_factor);
+	feedback_divider = div_u64(feedback_divider, calc_pll_cs->ref_freq_khz);
+
+/*Round to the number of precision
+ * The following code replace the old code (ullfeedbackDivider + 5)/10
+ * for example if the difference between the number
+ * of fractional feedback decimal point and the fractional FB Divider precision
+ * is 2 then the equation becomes (ullfeedbackDivider + 5*100) / (10*100))*/
+
+	feedback_divider += (uint64_t)
+			(5 * calc_pll_cs->fract_fb_divider_precision_factor);
+	feedback_divider =
+		div_u64(feedback_divider,
+			calc_pll_cs->fract_fb_divider_precision_factor * 10);
+	feedback_divider *= (uint64_t)
+			(calc_pll_cs->fract_fb_divider_precision_factor);
+
+	*feedback_divider_param =
+		div_u64_rem(
+			feedback_divider,
+			calc_pll_cs->fract_fb_divider_factor,
+			fract_feedback_divider_param);
+
+	if (*feedback_divider_param != 0)
+		return true;
+	return false;
+}
+
+/**
+*calc_fb_divider_checking_tolerance
+*
+*DESCRIPTION: Calculates Feedback and Fractional Feedback divider values
+*		for passed Reference and Post divider, checking for tolerance.
+*PARAMETERS:
+* pll_settings		Pointer to structure
+* ref_divider		Reference divider (already known)
+* postDivider		Post Divider (already known)
+* tolerance		Tolerance for Calculated Pixel Clock to be within
+*
+*RETURNS:
+* It fills the PLLSettings structure with PLL Dividers values
+* if calculated values are within required tolerance
+* It returns	- true if eror is within tolerance
+*		- false if eror is not within tolerance
+*/
+static bool calc_fb_divider_checking_tolerance(
+		struct calc_pll_clock_source *calc_pll_cs,
+		struct pll_settings *pll_settings,
+		uint32_t ref_divider,
+		uint32_t post_divider,
+		uint32_t tolerance)
+{
+	uint32_t feedback_divider;
+	uint32_t fract_feedback_divider;
+	uint32_t actual_calculated_clock_khz;
+	uint32_t abs_err;
+	uint64_t actual_calc_clk_khz;
+
+	calculate_fb_and_fractional_fb_divider(
+			calc_pll_cs,
+			pll_settings->adjusted_pix_clk,
+			ref_divider,
+			post_divider,
+			&feedback_divider,
+			&fract_feedback_divider);
+
+	/*Actual calculated value*/
+	actual_calc_clk_khz = (uint64_t)(feedback_divider *
+					calc_pll_cs->fract_fb_divider_factor) +
+							fract_feedback_divider;
+	actual_calc_clk_khz *= calc_pll_cs->ref_freq_khz;
+	actual_calc_clk_khz =
+		div_u64(actual_calc_clk_khz,
+			ref_divider * post_divider *
+				calc_pll_cs->fract_fb_divider_factor);
+
+	actual_calculated_clock_khz = (uint32_t)(actual_calc_clk_khz);
+
+	abs_err = (actual_calculated_clock_khz >
+					pll_settings->adjusted_pix_clk)
+			? actual_calculated_clock_khz -
+					pll_settings->adjusted_pix_clk
+			: pll_settings->adjusted_pix_clk -
+						actual_calculated_clock_khz;
+
+	if (abs_err <= tolerance) {
+		/*found good values*/
+		pll_settings->reference_freq = calc_pll_cs->ref_freq_khz;
+		pll_settings->reference_divider = ref_divider;
+		pll_settings->feedback_divider = feedback_divider;
+		pll_settings->fract_feedback_divider = fract_feedback_divider;
+		pll_settings->pix_clk_post_divider = post_divider;
+		pll_settings->calculated_pix_clk =
+			actual_calculated_clock_khz;
+		pll_settings->vco_freq =
+			actual_calculated_clock_khz * post_divider;
+		return true;
+	}
+	return false;
+}
+
+static bool calc_pll_dividers_in_range(
+		struct calc_pll_clock_source *calc_pll_cs,
+		struct pll_settings *pll_settings,
+		uint32_t min_ref_divider,
+		uint32_t max_ref_divider,
+		uint32_t min_post_divider,
+		uint32_t max_post_divider,
+		uint32_t err_tolerance)
+{
+	uint32_t ref_divider;
+	uint32_t post_divider;
+	uint32_t tolerance;
+
+/* This is err_tolerance / 10000 = 0.0025 - acceptable error of 0.25%
+ * This is errorTolerance / 10000 = 0.0001 - acceptable error of 0.01%*/
+	tolerance = (pll_settings->adjusted_pix_clk * err_tolerance) /
+									10000;
+	if (tolerance < CALC_PLL_CLK_SRC_ERR_TOLERANCE)
+		tolerance = CALC_PLL_CLK_SRC_ERR_TOLERANCE;
+
+	for (
+			post_divider = max_post_divider;
+			post_divider >= min_post_divider;
+			--post_divider) {
+		for (
+				ref_divider = min_ref_divider;
+				ref_divider <= max_ref_divider;
+				++ref_divider) {
+			if (calc_fb_divider_checking_tolerance(
+					calc_pll_cs,
+					pll_settings,
+					ref_divider,
+					post_divider,
+					tolerance)) {
+				return true;
+			}
+		}
+	}
+
+	return false;
+}
+
+static uint32_t calculate_pixel_clock_pll_dividers(
+		struct calc_pll_clock_source *calc_pll_cs,
+		struct pll_settings *pll_settings)
+{
+	uint32_t err_tolerance;
+	uint32_t min_post_divider;
+	uint32_t max_post_divider;
+	uint32_t min_ref_divider;
+	uint32_t max_ref_divider;
+
+	if (pll_settings->adjusted_pix_clk == 0) {
+		dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+			"%s Bad requested pixel clock", __func__);
+		return MAX_PLL_CALC_ERROR;
+	}
+
+/* 1) Find Post divider ranges */
+	if (pll_settings->pix_clk_post_divider) {
+		min_post_divider = pll_settings->pix_clk_post_divider;
+		max_post_divider = pll_settings->pix_clk_post_divider;
+	} else {
+		min_post_divider = calc_pll_cs->min_pix_clock_pll_post_divider;
+		if (min_post_divider * pll_settings->adjusted_pix_clk <
+						calc_pll_cs->min_vco_khz) {
+			min_post_divider = calc_pll_cs->min_vco_khz /
+					pll_settings->adjusted_pix_clk;
+			if ((min_post_divider *
+					pll_settings->adjusted_pix_clk) <
+						calc_pll_cs->min_vco_khz)
+				min_post_divider++;
+		}
+
+		max_post_divider = calc_pll_cs->max_pix_clock_pll_post_divider;
+		if (max_post_divider * pll_settings->adjusted_pix_clk
+				> calc_pll_cs->max_vco_khz)
+			max_post_divider = calc_pll_cs->max_vco_khz /
+					pll_settings->adjusted_pix_clk;
+	}
+
+/* 2) Find Reference divider ranges
+ * When SS is enabled, or for Display Port even without SS,
+ * pll_settings->referenceDivider is not zero.
+ * So calculate PPLL FB and fractional FB divider
+ * using the passed reference divider*/
+
+	if (pll_settings->reference_divider) {
+		min_ref_divider = pll_settings->reference_divider;
+		max_ref_divider = pll_settings->reference_divider;
+	} else {
+		min_ref_divider = ((calc_pll_cs->ref_freq_khz
+				/ calc_pll_cs->max_pll_input_freq_khz)
+				> calc_pll_cs->min_pll_ref_divider)
+			? calc_pll_cs->ref_freq_khz
+					/ calc_pll_cs->max_pll_input_freq_khz
+			: calc_pll_cs->min_pll_ref_divider;
+
+		max_ref_divider = ((calc_pll_cs->ref_freq_khz
+				/ calc_pll_cs->min_pll_input_freq_khz)
+				< calc_pll_cs->max_pll_ref_divider)
+			? calc_pll_cs->ref_freq_khz /
+					calc_pll_cs->min_pll_input_freq_khz
+			: calc_pll_cs->max_pll_ref_divider;
+	}
+
+/* If some parameters are invalid we could have scenario when  "min">"max"
+ * which produced endless loop later.
+ * We should investigate why we get the wrong parameters.
+ * But to follow the similar logic when "adjustedPixelClock" is set to be 0
+ * it is better to return here than cause system hang/watchdog timeout later.
+ *  ## SVS Wed 15 Jul 2009 */
+
+	if (min_post_divider > max_post_divider) {
+		dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+			"%s Post divider range is invalid", __func__);
+		return MAX_PLL_CALC_ERROR;
+	}
+
+	if (min_ref_divider > max_ref_divider) {
+		dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+			"%s Reference divider range is invalid", __func__);
+		return MAX_PLL_CALC_ERROR;
+	}
+
+/* 3) Try to find PLL dividers given ranges
+ * starting with minimal error tolerance.
+ * Increase error tolerance until PLL dividers found*/
+	err_tolerance = MAX_PLL_CALC_ERROR;
+
+	while (!calc_pll_dividers_in_range(
+			calc_pll_cs,
+			pll_settings,
+			min_ref_divider,
+			max_ref_divider,
+			min_post_divider,
+			max_post_divider,
+			err_tolerance))
+		err_tolerance += (err_tolerance > 10)
+				? (err_tolerance / 10)
+				: 1;
+
+	return err_tolerance;
+}
+
+static bool pll_adjust_pix_clk(
+		struct dce110_clk_src *clk_src,
+		struct pixel_clk_params *pix_clk_params,
+		struct pll_settings *pll_settings)
+{
+	uint32_t actual_pix_clk_khz = 0;
+	uint32_t requested_clk_khz = 0;
+	struct bp_adjust_pixel_clock_parameters bp_adjust_pixel_clock_params = {
+							0 };
+	enum bp_result bp_result;
+
+	switch (pix_clk_params->signal_type) {
+	case SIGNAL_TYPE_HDMI_TYPE_A: {
+		requested_clk_khz = pix_clk_params->requested_pix_clk;
+
+		switch (pix_clk_params->color_depth) {
+		case COLOR_DEPTH_101010:
+			requested_clk_khz = (requested_clk_khz * 5) >> 2;
+			break; /* x1.25*/
+		case COLOR_DEPTH_121212:
+			requested_clk_khz = (requested_clk_khz * 6) >> 2;
+			break; /* x1.5*/
+		case COLOR_DEPTH_161616:
+			requested_clk_khz = requested_clk_khz * 2;
+			break; /* x2.0*/
+		default:
+			break;
+		}
+
+		actual_pix_clk_khz = requested_clk_khz;
+	}
+		break;
+
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+	case SIGNAL_TYPE_EDP:
+		requested_clk_khz = pix_clk_params->requested_sym_clk;
+		actual_pix_clk_khz = pix_clk_params->requested_pix_clk;
+		break;
+
+	default:
+		requested_clk_khz = pix_clk_params->requested_pix_clk;
+		actual_pix_clk_khz = pix_clk_params->requested_pix_clk;
+		break;
+	}
+
+	bp_adjust_pixel_clock_params.pixel_clock = requested_clk_khz;
+	bp_adjust_pixel_clock_params.
+		encoder_object_id = pix_clk_params->encoder_object_id;
+	bp_adjust_pixel_clock_params.signal_type = pix_clk_params->signal_type;
+	bp_adjust_pixel_clock_params.
+		ss_enable = pix_clk_params->flags.ENABLE_SS;
+	bp_result = clk_src->bios->funcs->adjust_pixel_clock(
+			clk_src->bios, &bp_adjust_pixel_clock_params);
+	if (bp_result == BP_RESULT_OK) {
+		pll_settings->actual_pix_clk = actual_pix_clk_khz;
+		pll_settings->adjusted_pix_clk =
+			bp_adjust_pixel_clock_params.adjusted_pixel_clock;
+		pll_settings->reference_divider =
+			bp_adjust_pixel_clock_params.reference_divider;
+		pll_settings->pix_clk_post_divider =
+			bp_adjust_pixel_clock_params.pixel_clock_post_divider;
+
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * Calculate PLL Dividers for given Clock Value.
+ * First will call VBIOS Adjust Exec table to check if requested Pixel clock
+ * will be Adjusted based on usage.
+ * Then it will calculate PLL Dividers for this Adjusted clock using preferred
+ * method (Maximum VCO frequency).
+ *
+ * \return
+ *     Calculation error in units of 0.01%
+ */
+
+static uint32_t dce110_get_pix_clk_dividers_helper (
+		struct dce110_clk_src *clk_src,
+		struct pll_settings *pll_settings,
+		struct pixel_clk_params *pix_clk_params)
+{
+	uint32_t addr = 0;
+	uint32_t value = 0;
+	uint32_t field = 0;
+	uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
+
+	/* Check if reference clock is external (not pcie/xtalin)
+	* HW Dce80 spec:
+	* 00 - PCIE_REFCLK, 01 - XTALIN,    02 - GENERICA,    03 - GENERICB
+	* 04 - HSYNCA,      05 - GENLK_CLK, 06 - PCIE_REFCLK, 07 - DVOCLK0 */
+	value = REG_READ(PLL_CNTL);
+	REG_GET(PLL_CNTL, PLL_REF_DIV_SRC, &field);
+	pll_settings->use_external_clk = (field > 1);
+
+	/* VBIOS by default enables DP SS (spread on IDCLK) for DCE 8.0 always
+	 * (we do not care any more from SI for some older DP Sink which
+	 * does not report SS support, no known issues) */
+	if ((pix_clk_params->flags.ENABLE_SS) ||
+			(dc_is_dp_signal(pix_clk_params->signal_type))) {
+
+		const struct spread_spectrum_data *ss_data = get_ss_data_entry(
+					clk_src,
+					pix_clk_params->signal_type,
+					pll_settings->adjusted_pix_clk);
+
+		if (NULL != ss_data)
+			pll_settings->ss_percentage = ss_data->percentage;
+	}
+
+	/* Check VBIOS AdjustPixelClock Exec table */
+	if (!pll_adjust_pix_clk(clk_src, pix_clk_params, pll_settings)) {
+		/* Should never happen, ASSERT and fill up values to be able
+		 * to continue. */
+		dm_logger_write(clk_src->base.ctx->logger, LOG_ERROR,
+			"%s: Failed to adjust pixel clock!!", __func__);
+		pll_settings->actual_pix_clk =
+				pix_clk_params->requested_pix_clk;
+		pll_settings->adjusted_pix_clk =
+				pix_clk_params->requested_pix_clk;
+
+		if (dc_is_dp_signal(pix_clk_params->signal_type))
+			pll_settings->adjusted_pix_clk = 100000;
+	}
+
+	/* Calculate Dividers */
+	if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
+		/*Calculate Dividers by HDMI object, no SS case or SS case */
+		pll_calc_error =
+			calculate_pixel_clock_pll_dividers(
+					&clk_src->calc_pll_hdmi,
+					pll_settings);
+	else
+		/*Calculate Dividers by default object, no SS case or SS case */
+		pll_calc_error =
+			calculate_pixel_clock_pll_dividers(
+					&clk_src->calc_pll,
+					pll_settings);
+
+	return pll_calc_error;
+}
+
+static void dce112_get_pix_clk_dividers_helper (
+		struct dce110_clk_src *clk_src,
+		struct pll_settings *pll_settings,
+		struct pixel_clk_params *pix_clk_params)
+{
+	uint32_t actualPixelClockInKHz;
+
+	actualPixelClockInKHz = pix_clk_params->requested_pix_clk;
+	/* Calculate Dividers */
+	if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) {
+		switch (pix_clk_params->color_depth) {
+		case COLOR_DEPTH_101010:
+			actualPixelClockInKHz = (actualPixelClockInKHz * 5) >> 2;
+			break;
+		case COLOR_DEPTH_121212:
+			actualPixelClockInKHz = (actualPixelClockInKHz * 6) >> 2;
+			break;
+		case COLOR_DEPTH_161616:
+			actualPixelClockInKHz = actualPixelClockInKHz * 2;
+			break;
+		default:
+			break;
+		}
+	}
+	pll_settings->actual_pix_clk = actualPixelClockInKHz;
+	pll_settings->adjusted_pix_clk = actualPixelClockInKHz;
+	pll_settings->calculated_pix_clk = pix_clk_params->requested_pix_clk;
+}
+
+static uint32_t dce110_get_pix_clk_dividers(
+		struct clock_source *cs,
+		struct pixel_clk_params *pix_clk_params,
+		struct pll_settings *pll_settings)
+{
+	struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs);
+	uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
+
+	if (pix_clk_params == NULL || pll_settings == NULL
+			|| pix_clk_params->requested_pix_clk == 0) {
+		dm_logger_write(clk_src->base.ctx->logger, LOG_ERROR,
+			"%s: Invalid parameters!!\n", __func__);
+		return pll_calc_error;
+	}
+
+	memset(pll_settings, 0, sizeof(*pll_settings));
+
+	if (cs->id == CLOCK_SOURCE_ID_DP_DTO ||
+			cs->id == CLOCK_SOURCE_ID_EXTERNAL) {
+		pll_settings->adjusted_pix_clk = clk_src->ext_clk_khz;
+		pll_settings->calculated_pix_clk = clk_src->ext_clk_khz;
+		pll_settings->actual_pix_clk =
+					pix_clk_params->requested_pix_clk;
+		return 0;
+	}
+
+	switch (cs->ctx->dce_version) {
+	case DCE_VERSION_8_0:
+	case DCE_VERSION_10_0:
+	case DCE_VERSION_11_0:
+		pll_calc_error =
+			dce110_get_pix_clk_dividers_helper(clk_src,
+			pll_settings, pix_clk_params);
+		break;
+	case DCE_VERSION_11_2:
+		dce112_get_pix_clk_dividers_helper(clk_src,
+				pll_settings, pix_clk_params);
+		break;
+	default:
+		break;
+	}
+
+	return pll_calc_error;
+}
+
+static bool disable_spread_spectrum(struct dce110_clk_src *clk_src)
+{
+	enum bp_result result;
+	struct bp_spread_spectrum_parameters bp_ss_params = {0};
+
+	bp_ss_params.pll_id = clk_src->base.id;
+
+	/*Call ASICControl to process ATOMBIOS Exec table*/
+	result = clk_src->bios->funcs->enable_spread_spectrum_on_ppll(
+			clk_src->bios,
+			&bp_ss_params,
+			false);
+
+	return result == BP_RESULT_OK;
+}
+
+static bool calculate_ss(
+		const struct pll_settings *pll_settings,
+		const struct spread_spectrum_data *ss_data,
+		struct delta_sigma_data *ds_data)
+{
+	struct fixed32_32 fb_div;
+	struct fixed32_32 ss_amount;
+	struct fixed32_32 ss_nslip_amount;
+	struct fixed32_32 ss_ds_frac_amount;
+	struct fixed32_32 ss_step_size;
+	struct fixed32_32 modulation_time;
+
+	if (ds_data == NULL)
+		return false;
+	if (ss_data == NULL)
+		return false;
+	if (ss_data->percentage == 0)
+		return false;
+	if (pll_settings == NULL)
+		return false;
+
+	memset(ds_data, 0, sizeof(struct delta_sigma_data));
+
+	/* compute SS_AMOUNT_FBDIV & SS_AMOUNT_NFRAC_SLIP & SS_AMOUNT_DSFRAC*/
+	/* 6 decimal point support in fractional feedback divider */
+	fb_div  = dal_fixed32_32_from_fraction(
+		pll_settings->fract_feedback_divider, 1000000);
+	fb_div = dal_fixed32_32_add_int(fb_div, pll_settings->feedback_divider);
+
+	ds_data->ds_frac_amount = 0;
+	/*spreadSpectrumPercentage is in the unit of .01%,
+	 * so have to divided by 100 * 100*/
+	ss_amount = dal_fixed32_32_mul(
+		fb_div, dal_fixed32_32_from_fraction(ss_data->percentage,
+					100 * ss_data->percentage_divider));
+	ds_data->feedback_amount = dal_fixed32_32_floor(ss_amount);
+
+	ss_nslip_amount = dal_fixed32_32_sub(ss_amount,
+		dal_fixed32_32_from_int(ds_data->feedback_amount));
+	ss_nslip_amount = dal_fixed32_32_mul_int(ss_nslip_amount, 10);
+	ds_data->nfrac_amount = dal_fixed32_32_floor(ss_nslip_amount);
+
+	ss_ds_frac_amount = dal_fixed32_32_sub(ss_nslip_amount,
+		dal_fixed32_32_from_int(ds_data->nfrac_amount));
+	ss_ds_frac_amount = dal_fixed32_32_mul_int(ss_ds_frac_amount, 65536);
+	ds_data->ds_frac_amount = dal_fixed32_32_floor(ss_ds_frac_amount);
+
+	/* compute SS_STEP_SIZE_DSFRAC */
+	modulation_time = dal_fixed32_32_from_fraction(
+		pll_settings->reference_freq * 1000,
+		pll_settings->reference_divider * ss_data->modulation_freq_hz);
+
+	if (ss_data->flags.CENTER_SPREAD)
+		modulation_time = dal_fixed32_32_div_int(modulation_time, 4);
+	else
+		modulation_time = dal_fixed32_32_div_int(modulation_time, 2);
+
+	ss_step_size = dal_fixed32_32_div(ss_amount, modulation_time);
+	/* SS_STEP_SIZE_DSFRAC_DEC = Int(SS_STEP_SIZE * 2 ^ 16 * 10)*/
+	ss_step_size = dal_fixed32_32_mul_int(ss_step_size, 65536 * 10);
+	ds_data->ds_frac_size =  dal_fixed32_32_floor(ss_step_size);
+
+	return true;
+}
+
+static bool enable_spread_spectrum(
+		struct dce110_clk_src *clk_src,
+		enum signal_type signal, struct pll_settings *pll_settings)
+{
+	struct bp_spread_spectrum_parameters bp_params = {0};
+	struct delta_sigma_data d_s_data;
+	const struct spread_spectrum_data *ss_data = NULL;
+
+	ss_data = get_ss_data_entry(
+			clk_src,
+			signal,
+			pll_settings->calculated_pix_clk);
+
+/* Pixel clock PLL has been programmed to generate desired pixel clock,
+ * now enable SS on pixel clock */
+/* TODO is it OK to return true not doing anything ??*/
+	if (ss_data != NULL && pll_settings->ss_percentage != 0) {
+		if (calculate_ss(pll_settings, ss_data, &d_s_data)) {
+			bp_params.ds.feedback_amount =
+					d_s_data.feedback_amount;
+			bp_params.ds.nfrac_amount =
+					d_s_data.nfrac_amount;
+			bp_params.ds.ds_frac_size = d_s_data.ds_frac_size;
+			bp_params.ds_frac_amount =
+					d_s_data.ds_frac_amount;
+			bp_params.flags.DS_TYPE = 1;
+			bp_params.pll_id = clk_src->base.id;
+			bp_params.percentage = ss_data->percentage;
+			if (ss_data->flags.CENTER_SPREAD)
+				bp_params.flags.CENTER_SPREAD = 1;
+			if (ss_data->flags.EXTERNAL_SS)
+				bp_params.flags.EXTERNAL_SS = 1;
+
+			if (BP_RESULT_OK !=
+				clk_src->bios->funcs->
+					enable_spread_spectrum_on_ppll(
+							clk_src->bios,
+							&bp_params,
+							true))
+				return false;
+		} else
+			return false;
+	}
+	return true;
+}
+
+static void dce110_program_pixel_clk_resync(
+		struct dce110_clk_src *clk_src,
+		enum signal_type signal_type,
+		enum dc_color_depth colordepth)
+{
+	uint32_t value = 0;
+
+	REG_UPDATE(RESYNC_CNTL,
+			DCCG_DEEP_COLOR_CNTL1, 0);
+	/*
+	 24 bit mode: TMDS clock = 1.0 x pixel clock  (1:1)
+	 30 bit mode: TMDS clock = 1.25 x pixel clock (5:4)
+	 36 bit mode: TMDS clock = 1.5 x pixel clock  (3:2)
+	 48 bit mode: TMDS clock = 2 x pixel clock    (2:1)
+	 */
+	if (signal_type != SIGNAL_TYPE_HDMI_TYPE_A)
+		return;
+
+	switch (colordepth) {
+	case COLOR_DEPTH_888:
+		REG_UPDATE(RESYNC_CNTL,
+				DCCG_DEEP_COLOR_CNTL1, 0);
+		break;
+	case COLOR_DEPTH_101010:
+		REG_UPDATE(RESYNC_CNTL,
+				DCCG_DEEP_COLOR_CNTL1, 1);
+		break;
+	case COLOR_DEPTH_121212:
+		REG_UPDATE(RESYNC_CNTL,
+				DCCG_DEEP_COLOR_CNTL1, 2);
+		break;
+	case COLOR_DEPTH_161616:
+		REG_UPDATE(RESYNC_CNTL,
+				DCCG_DEEP_COLOR_CNTL1, 3);
+		break;
+	default:
+		break;
+	}
+}
+
+static void dce112_program_pixel_clk_resync(
+		struct dce110_clk_src *clk_src,
+		enum signal_type signal_type,
+		enum dc_color_depth colordepth,
+		bool enable_ycbcr420)
+{
+	uint32_t value = 0;
+
+	REG_UPDATE(PIXCLK_RESYNC_CNTL,
+			PHYPLLA_DCCG_DEEP_COLOR_CNTL, 0);
+	/*
+	 24 bit mode: TMDS clock = 1.0 x pixel clock  (1:1)
+	 30 bit mode: TMDS clock = 1.25 x pixel clock (5:4)
+	 36 bit mode: TMDS clock = 1.5 x pixel clock  (3:2)
+	 48 bit mode: TMDS clock = 2 x pixel clock    (2:1)
+	 */
+	if (signal_type != SIGNAL_TYPE_HDMI_TYPE_A)
+		return;
+
+	switch (colordepth) {
+	case COLOR_DEPTH_888:
+		REG_UPDATE_2(PIXCLK_RESYNC_CNTL,
+				PHYPLLA_DCCG_DEEP_COLOR_CNTL, 0,
+				PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, enable_ycbcr420);
+		break;
+	case COLOR_DEPTH_101010:
+		REG_UPDATE_2(PIXCLK_RESYNC_CNTL,
+				PHYPLLA_DCCG_DEEP_COLOR_CNTL, 1,
+				PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, enable_ycbcr420);
+		break;
+	case COLOR_DEPTH_121212:
+		REG_UPDATE_2(PIXCLK_RESYNC_CNTL,
+				PHYPLLA_DCCG_DEEP_COLOR_CNTL, 2,
+				PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, enable_ycbcr420);
+		break;
+	case COLOR_DEPTH_161616:
+		REG_UPDATE_2(PIXCLK_RESYNC_CNTL,
+				PHYPLLA_DCCG_DEEP_COLOR_CNTL, 3,
+				PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, enable_ycbcr420);
+		break;
+	default:
+		break;
+	}
+}
+
+static bool dce110_program_pix_clk(
+		struct clock_source *clk_src,
+		struct pixel_clk_params *pix_clk_params,
+		struct pll_settings *pll_settings)
+{
+	struct dce110_clk_src *dce110_clk_src = TO_DCE110_CLK_SRC(clk_src);
+	struct bp_pixel_clock_parameters bp_pc_params = {0};
+
+	/* First disable SS
+	 * ATOMBIOS will enable by default SS on PLL for DP,
+	 * do not disable it here
+	 */
+	if (clk_src->id != CLOCK_SOURCE_ID_EXTERNAL &&
+			!dc_is_dp_signal(pix_clk_params->signal_type) &&
+			clk_src->ctx->dce_version <= DCE_VERSION_11_0)
+		disable_spread_spectrum(dce110_clk_src);
+
+	/*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/
+	bp_pc_params.controller_id = pix_clk_params->controller_id;
+	bp_pc_params.pll_id = clk_src->id;
+	bp_pc_params.target_pixel_clock = pll_settings->actual_pix_clk;
+	bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id;
+	bp_pc_params.signal_type = pix_clk_params->signal_type;
+
+	switch (clk_src->ctx->dce_version) {
+	case DCE_VERSION_11_2:
+		if (clk_src->id != CLOCK_SOURCE_ID_DP_DTO) {
+			bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC =
+							pll_settings->use_external_clk;
+			bp_pc_params.flags.SET_XTALIN_REF_SRC =
+							!pll_settings->use_external_clk;
+			if (pix_clk_params->flags.SUPPORT_YCBCR420) {
+				bp_pc_params.target_pixel_clock = pll_settings->actual_pix_clk / 2;
+				bp_pc_params.flags.SUPPORT_YUV_420 = 1;
+			}
+		}
+		if (dce110_clk_src->bios->funcs->set_pixel_clock(
+				dce110_clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
+			return false;
+		/* Resync deep color DTO */
+		if (clk_src->id != CLOCK_SOURCE_ID_DP_DTO)
+			dce112_program_pixel_clk_resync(dce110_clk_src,
+						pix_clk_params->signal_type,
+						pix_clk_params->color_depth,
+						pix_clk_params->flags.SUPPORT_YCBCR420);
+		break;
+	case DCE_VERSION_8_0:
+	case DCE_VERSION_10_0:
+	case DCE_VERSION_11_0:
+		bp_pc_params.reference_divider = pll_settings->reference_divider;
+		bp_pc_params.feedback_divider = pll_settings->feedback_divider;
+		bp_pc_params.fractional_feedback_divider =
+				pll_settings->fract_feedback_divider;
+		bp_pc_params.pixel_clock_post_divider =
+				pll_settings->pix_clk_post_divider;
+		bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC =
+						pll_settings->use_external_clk;
+
+		if (dce110_clk_src->bios->funcs->set_pixel_clock(
+				dce110_clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
+			return false;
+		/* Enable SS
+		 * ATOMBIOS will enable by default SS for DP on PLL ( DP ID clock),
+		 * based on HW display PLL team, SS control settings should be programmed
+		 * during PLL Reset, but they do not have effect
+		 * until SS_EN is asserted.*/
+		if (clk_src->id != CLOCK_SOURCE_ID_EXTERNAL
+			&& pix_clk_params->flags.ENABLE_SS && !dc_is_dp_signal(
+							pix_clk_params->signal_type)) {
+
+			if (!enable_spread_spectrum(dce110_clk_src,
+							pix_clk_params->signal_type,
+							pll_settings))
+				return false;
+			/* Resync deep color DTO */
+			dce110_program_pixel_clk_resync(dce110_clk_src,
+						pix_clk_params->signal_type,
+						pix_clk_params->color_depth);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return true;
+}
+
+static bool dce110_clock_source_power_down(
+		struct clock_source *clk_src)
+{
+	struct dce110_clk_src *dce110_clk_src = TO_DCE110_CLK_SRC(clk_src);
+	enum bp_result bp_result;
+	struct bp_pixel_clock_parameters bp_pixel_clock_params = {0};
+
+	if (clk_src->dp_clk_src)
+		return true;
+
+	/* If Pixel Clock is 0 it means Power Down Pll*/
+	bp_pixel_clock_params.controller_id = CONTROLLER_ID_UNDEFINED;
+	bp_pixel_clock_params.pll_id = clk_src->id;
+	bp_pixel_clock_params.flags.FORCE_PROGRAMMING_OF_PLL = 1;
+
+	/*Call ASICControl to process ATOMBIOS Exec table*/
+	bp_result = dce110_clk_src->bios->funcs->set_pixel_clock(
+			dce110_clk_src->bios,
+			&bp_pixel_clock_params);
+
+	return bp_result == BP_RESULT_OK;
+}
+
+/*****************************************/
+/* Constructor                           */
+/*****************************************/
+static const struct clock_source_funcs dce110_clk_src_funcs = {
+	.cs_power_down = dce110_clock_source_power_down,
+	.program_pix_clk = dce110_program_pix_clk,
+	.get_pix_clk_dividers = dce110_get_pix_clk_dividers
+};
+
+static void get_ss_info_from_atombios(
+		struct dce110_clk_src *clk_src,
+		enum as_signal_type as_signal,
+		struct spread_spectrum_data *spread_spectrum_data[],
+		uint32_t *ss_entries_num)
+{
+	enum bp_result bp_result = BP_RESULT_FAILURE;
+	struct spread_spectrum_info *ss_info;
+	struct spread_spectrum_data *ss_data;
+	struct spread_spectrum_info *ss_info_cur;
+	struct spread_spectrum_data *ss_data_cur;
+	uint32_t i;
+
+	if (ss_entries_num == NULL) {
+		dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+			"Invalid entry !!!\n");
+		return;
+	}
+	if (spread_spectrum_data == NULL) {
+		dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+			"Invalid array pointer!!!\n");
+		return;
+	}
+
+	spread_spectrum_data[0] = NULL;
+	*ss_entries_num = 0;
+
+	*ss_entries_num = clk_src->bios->funcs->get_ss_entry_number(
+			clk_src->bios,
+			as_signal);
+
+	if (*ss_entries_num == 0)
+		return;
+
+	ss_info = dm_alloc(sizeof(struct spread_spectrum_info) * (*ss_entries_num));
+	ss_info_cur = ss_info;
+	if (ss_info == NULL)
+		return;
+
+	ss_data = dm_alloc(sizeof(struct spread_spectrum_data) * (*ss_entries_num));
+	if (ss_data == NULL)
+		goto out_free_info;
+
+	for (i = 0, ss_info_cur = ss_info;
+		i < (*ss_entries_num);
+		++i, ++ss_info_cur) {
+
+		bp_result = clk_src->bios->funcs->get_spread_spectrum_info(
+				clk_src->bios,
+				as_signal,
+				i,
+				ss_info_cur);
+
+		if (bp_result != BP_RESULT_OK)
+			goto out_free_data;
+	}
+
+	for (i = 0, ss_info_cur = ss_info, ss_data_cur = ss_data;
+		i < (*ss_entries_num);
+		++i, ++ss_info_cur, ++ss_data_cur) {
+
+		if (ss_info_cur->type.STEP_AND_DELAY_INFO != false) {
+			dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+				"Invalid ATOMBIOS SS Table!!!\n");
+			goto out_free_data;
+		}
+
+		/* for HDMI check SS percentage,
+		 * if it is > 6 (0.06%), the ATOMBIOS table info is invalid*/
+		if (as_signal == AS_SIGNAL_TYPE_HDMI
+				&& ss_info_cur->spread_spectrum_percentage > 6){
+			/* invalid input, do nothing */
+			dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+				"Invalid SS percentage ");
+			dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+				"for HDMI in ATOMBIOS info Table!!!\n");
+			continue;
+		}
+		if (ss_info_cur->spread_percentage_divider == 1000) {
+			/* Keep previous precision from ATOMBIOS for these
+			* in case new precision set by ATOMBIOS for these
+			* (otherwise all code in DCE specific classes
+			* for all previous ASICs would need
+			* to be updated for SS calculations,
+			* Audio SS compensation and DP DTO SS compensation
+			* which assumes fixed SS percentage Divider = 100)*/
+			ss_info_cur->spread_spectrum_percentage /= 10;
+			ss_info_cur->spread_percentage_divider = 100;
+		}
+
+		ss_data_cur->freq_range_khz = ss_info_cur->target_clock_range;
+		ss_data_cur->percentage =
+				ss_info_cur->spread_spectrum_percentage;
+		ss_data_cur->percentage_divider =
+				ss_info_cur->spread_percentage_divider;
+		ss_data_cur->modulation_freq_hz =
+				ss_info_cur->spread_spectrum_range;
+
+		if (ss_info_cur->type.CENTER_MODE)
+			ss_data_cur->flags.CENTER_SPREAD = 1;
+
+		if (ss_info_cur->type.EXTERNAL)
+			ss_data_cur->flags.EXTERNAL_SS = 1;
+
+	}
+
+	*spread_spectrum_data = ss_data;
+	dm_free(ss_info);
+	return;
+
+out_free_data:
+	dm_free(ss_data);
+	*ss_entries_num = 0;
+out_free_info:
+	dm_free(ss_info);
+}
+
+static void ss_info_from_atombios_create(
+	struct dce110_clk_src *clk_src)
+{
+	get_ss_info_from_atombios(
+		clk_src,
+		AS_SIGNAL_TYPE_DISPLAY_PORT,
+		&clk_src->dp_ss_params,
+		&clk_src->dp_ss_params_cnt);
+	get_ss_info_from_atombios(
+		clk_src,
+		AS_SIGNAL_TYPE_HDMI,
+		&clk_src->hdmi_ss_params,
+		&clk_src->hdmi_ss_params_cnt);
+	get_ss_info_from_atombios(
+		clk_src,
+		AS_SIGNAL_TYPE_DVI,
+		&clk_src->dvi_ss_params,
+		&clk_src->dvi_ss_params_cnt);
+}
+
+static bool calc_pll_max_vco_construct(
+			struct calc_pll_clock_source *calc_pll_cs,
+			struct calc_pll_clock_source_init_data *init_data)
+{
+	uint32_t i;
+	struct firmware_info fw_info = { { 0 } };
+	if (calc_pll_cs == NULL ||
+			init_data == NULL ||
+			init_data->bp == NULL)
+		return false;
+
+	if (init_data->bp->funcs->get_firmware_info(
+				init_data->bp,
+				&fw_info) != BP_RESULT_OK)
+		return false;
+
+	calc_pll_cs->ctx = init_data->ctx;
+	calc_pll_cs->ref_freq_khz = fw_info.pll_info.crystal_frequency;
+	calc_pll_cs->min_vco_khz =
+			fw_info.pll_info.min_output_pxl_clk_pll_frequency;
+	calc_pll_cs->max_vco_khz =
+			fw_info.pll_info.max_output_pxl_clk_pll_frequency;
+
+	if (init_data->max_override_input_pxl_clk_pll_freq_khz != 0)
+		calc_pll_cs->max_pll_input_freq_khz =
+			init_data->max_override_input_pxl_clk_pll_freq_khz;
+	else
+		calc_pll_cs->max_pll_input_freq_khz =
+			fw_info.pll_info.max_input_pxl_clk_pll_frequency;
+
+	if (init_data->min_override_input_pxl_clk_pll_freq_khz != 0)
+		calc_pll_cs->min_pll_input_freq_khz =
+			init_data->min_override_input_pxl_clk_pll_freq_khz;
+	else
+		calc_pll_cs->min_pll_input_freq_khz =
+			fw_info.pll_info.min_input_pxl_clk_pll_frequency;
+
+	calc_pll_cs->min_pix_clock_pll_post_divider =
+			init_data->min_pix_clk_pll_post_divider;
+	calc_pll_cs->max_pix_clock_pll_post_divider =
+			init_data->max_pix_clk_pll_post_divider;
+	calc_pll_cs->min_pll_ref_divider =
+			init_data->min_pll_ref_divider;
+	calc_pll_cs->max_pll_ref_divider =
+			init_data->max_pll_ref_divider;
+
+	if (init_data->num_fract_fb_divider_decimal_point == 0 ||
+		init_data->num_fract_fb_divider_decimal_point_precision >
+				init_data->num_fract_fb_divider_decimal_point) {
+		dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+			"The dec point num or precision is incorrect!");
+		return false;
+	}
+	if (init_data->num_fract_fb_divider_decimal_point_precision == 0) {
+		dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+			"Incorrect fract feedback divider precision num!");
+		return false;
+	}
+
+	calc_pll_cs->fract_fb_divider_decimal_points_num =
+				init_data->num_fract_fb_divider_decimal_point;
+	calc_pll_cs->fract_fb_divider_precision =
+			init_data->num_fract_fb_divider_decimal_point_precision;
+	calc_pll_cs->fract_fb_divider_factor = 1;
+	for (i = 0; i < calc_pll_cs->fract_fb_divider_decimal_points_num; ++i)
+		calc_pll_cs->fract_fb_divider_factor *= 10;
+
+	calc_pll_cs->fract_fb_divider_precision_factor = 1;
+	for (
+		i = 0;
+		i < (calc_pll_cs->fract_fb_divider_decimal_points_num -
+				calc_pll_cs->fract_fb_divider_precision);
+		++i)
+		calc_pll_cs->fract_fb_divider_precision_factor *= 10;
+
+	return true;
+}
+
+bool dce110_clk_src_construct(
+	struct dce110_clk_src *clk_src,
+	struct dc_context *ctx,
+	struct dc_bios *bios,
+	enum clock_source_id id,
+	const struct dce110_clk_src_regs *regs,
+	const struct dce110_clk_src_shift *cs_shift,
+	const struct dce110_clk_src_mask *cs_mask)
+{
+	struct firmware_info fw_info = { { 0 } };
+	struct calc_pll_clock_source_init_data calc_pll_cs_init_data_hdmi;
+	struct calc_pll_clock_source_init_data calc_pll_cs_init_data;
+
+	clk_src->base.ctx = ctx;
+	clk_src->bios = bios;
+	clk_src->base.id = id;
+	clk_src->base.funcs = &dce110_clk_src_funcs;
+
+	clk_src->regs = regs;
+	clk_src->cs_shift = cs_shift;
+	clk_src->cs_mask = cs_mask;
+
+	if (clk_src->bios->funcs->get_firmware_info(
+			clk_src->bios, &fw_info) != BP_RESULT_OK) {
+		ASSERT_CRITICAL(false);
+		goto unexpected_failure;
+	}
+
+	clk_src->ext_clk_khz =
+			fw_info.external_clock_source_frequency_for_dp;
+
+	switch (clk_src->base.ctx->dce_version) {
+	case DCE_VERSION_8_0:
+	case DCE_VERSION_10_0:
+	case DCE_VERSION_11_0:
+
+		/* structure normally used with PLL ranges from ATOMBIOS; DS on by default */
+		calc_pll_cs_init_data.bp = bios;
+		calc_pll_cs_init_data.min_pix_clk_pll_post_divider = 1;
+		calc_pll_cs_init_data.max_pix_clk_pll_post_divider =
+				clk_src->cs_mask->PLL_POST_DIV_PIXCLK;
+		calc_pll_cs_init_data.min_pll_ref_divider =	1;
+		calc_pll_cs_init_data.max_pll_ref_divider =	clk_src->cs_mask->PLL_REF_DIV;
+		/* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+		calc_pll_cs_init_data.min_override_input_pxl_clk_pll_freq_khz =	0;
+		/* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+		calc_pll_cs_init_data.max_override_input_pxl_clk_pll_freq_khz =	0;
+		/*numberOfFractFBDividerDecimalPoints*/
+		calc_pll_cs_init_data.num_fract_fb_divider_decimal_point =
+				FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+		/*number of decimal point to round off for fractional feedback divider value*/
+		calc_pll_cs_init_data.num_fract_fb_divider_decimal_point_precision =
+				FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+		calc_pll_cs_init_data.ctx =	ctx;
+
+		/*structure for HDMI, no SS or SS% <= 0.06% for 27 MHz Ref clock */
+		calc_pll_cs_init_data_hdmi.bp = bios;
+		calc_pll_cs_init_data_hdmi.min_pix_clk_pll_post_divider = 1;
+		calc_pll_cs_init_data_hdmi.max_pix_clk_pll_post_divider =
+				clk_src->cs_mask->PLL_POST_DIV_PIXCLK;
+		calc_pll_cs_init_data_hdmi.min_pll_ref_divider = 1;
+		calc_pll_cs_init_data_hdmi.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV;
+		/* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+		calc_pll_cs_init_data_hdmi.min_override_input_pxl_clk_pll_freq_khz = 13500;
+		/* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+		calc_pll_cs_init_data_hdmi.max_override_input_pxl_clk_pll_freq_khz = 27000;
+		/*numberOfFractFBDividerDecimalPoints*/
+		calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point =
+				FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+		/*number of decimal point to round off for fractional feedback divider value*/
+		calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point_precision =
+				FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+		calc_pll_cs_init_data_hdmi.ctx = ctx;
+
+		clk_src->ref_freq_khz = fw_info.pll_info.crystal_frequency;
+
+		if (clk_src->base.id == CLOCK_SOURCE_ID_EXTERNAL)
+			return true;
+
+		/* PLL only from here on */
+		ss_info_from_atombios_create(clk_src);
+
+		if (!calc_pll_max_vco_construct(
+				&clk_src->calc_pll,
+				&calc_pll_cs_init_data)) {
+			ASSERT_CRITICAL(false);
+			goto unexpected_failure;
+		}
+
+		if (clk_src->ref_freq_khz == 48000) {
+			calc_pll_cs_init_data_hdmi.
+				min_override_input_pxl_clk_pll_freq_khz = 24000;
+			calc_pll_cs_init_data_hdmi.
+				max_override_input_pxl_clk_pll_freq_khz = 48000;
+		} else if (clk_src->ref_freq_khz == 100000) {
+			calc_pll_cs_init_data_hdmi.
+				min_override_input_pxl_clk_pll_freq_khz = 25000;
+			calc_pll_cs_init_data_hdmi.
+				max_override_input_pxl_clk_pll_freq_khz = 50000;
+		}
+
+		if (!calc_pll_max_vco_construct(
+				&clk_src->calc_pll_hdmi, &calc_pll_cs_init_data_hdmi)) {
+			ASSERT_CRITICAL(false);
+			goto unexpected_failure;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return true;
+
+unexpected_failure:
+	return false;
+}
+

+ 109 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h

@@ -0,0 +1,109 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_CLOCK_SOURCE_DCE_H__
+#define __DC_CLOCK_SOURCE_DCE_H__
+
+#include "../inc/clock_source.h"
+
+#define TO_DCE110_CLK_SRC(clk_src)\
+	container_of(clk_src, struct dce110_clk_src, base)
+
+#define CS_COMMON_REG_LIST_DCE_100_110(id) \
+		SRI(RESYNC_CNTL, PIXCLK, id), \
+		SRI(PLL_CNTL, BPHYC_PLL, id)
+
+#define CS_COMMON_REG_LIST_DCE_80(id) \
+		SRI(RESYNC_CNTL, PIXCLK, id), \
+		SRI(PLL_CNTL, DCCG_PLL, id)
+
+#define CS_COMMON_REG_LIST_DCE_112(id) \
+		SRI(PIXCLK_RESYNC_CNTL, PHYPLL, id)
+
+#define CS_SF(reg_name, field_name, post_fix)\
+	.field_name = reg_name ## __ ## field_name ## post_fix
+
+#define CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
+	CS_SF(PLL_CNTL, PLL_REF_DIV_SRC, mask_sh),\
+	CS_SF(PIXCLK1_RESYNC_CNTL, DCCG_DEEP_COLOR_CNTL1, mask_sh),\
+	CS_SF(PLL_POST_DIV, PLL_POST_DIV_PIXCLK, mask_sh),\
+	CS_SF(PLL_REF_DIV, PLL_REF_DIV, mask_sh),\
+
+#define CS_COMMON_MASK_SH_LIST_DCE_112(mask_sh)\
+	CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
+	CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh),\
+
+#define CS_REG_FIELD_LIST(type) \
+	type PLL_REF_DIV_SRC; \
+	type DCCG_DEEP_COLOR_CNTL1; \
+	type PHYPLLA_DCCG_DEEP_COLOR_CNTL; \
+	type PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE; \
+	type PLL_POST_DIV_PIXCLK; \
+	type PLL_REF_DIV; \
+
+struct dce110_clk_src_shift {
+	CS_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce110_clk_src_mask{
+	CS_REG_FIELD_LIST(uint32_t)
+};
+
+struct dce110_clk_src_regs {
+	uint32_t RESYNC_CNTL;
+	uint32_t PIXCLK_RESYNC_CNTL;
+	uint32_t PLL_CNTL;
+};
+
+struct dce110_clk_src {
+	struct clock_source base;
+	const struct dce110_clk_src_regs *regs;
+	const struct dce110_clk_src_mask *cs_mask;
+	const struct dce110_clk_src_shift *cs_shift;
+	struct dc_bios *bios;
+
+	struct spread_spectrum_data *dp_ss_params;
+	uint32_t dp_ss_params_cnt;
+	struct spread_spectrum_data *hdmi_ss_params;
+	uint32_t hdmi_ss_params_cnt;
+	struct spread_spectrum_data *dvi_ss_params;
+	uint32_t dvi_ss_params_cnt;
+
+	uint32_t ext_clk_khz;
+	uint32_t ref_freq_khz;
+
+	struct calc_pll_clock_source calc_pll;
+	struct calc_pll_clock_source calc_pll_hdmi;
+};
+
+bool dce110_clk_src_construct(
+	struct dce110_clk_src *clk_src,
+	struct dc_context *ctx,
+	struct dc_bios *bios,
+	enum clock_source_id,
+	const struct dce110_clk_src_regs *regs,
+	const struct dce110_clk_src_shift *cs_shift,
+	const struct dce110_clk_src_mask *cs_mask);
+
+#endif

+ 195 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c

@@ -0,0 +1,195 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_hwseq.h"
+#include "reg_helper.h"
+#include "hw_sequencer.h"
+
+#define CTX \
+	hws->ctx
+#define REG(reg)\
+	hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+	hws->shifts->field_name, hws->masks->field_name
+
+void dce_enable_fe_clock(struct dce_hwseq *hws,
+		unsigned int fe_inst, bool enable)
+{
+	REG_UPDATE(DCFE_CLOCK_CONTROL[fe_inst],
+			DCFE_CLOCK_ENABLE, enable);
+}
+
+void dce_pipe_control_lock(struct dce_hwseq *hws,
+		unsigned int blnd_inst,
+		enum pipe_lock_control control_mask,
+		bool lock)
+{
+	uint32_t lock_val = lock ? 1 : 0;
+	uint32_t dcp_grph, scl, dcp_grph_surf, blnd, update_lock_mode;
+
+	uint32_t val = REG_GET_5(BLND_V_UPDATE_LOCK[blnd_inst],
+			BLND_DCP_GRPH_V_UPDATE_LOCK, &dcp_grph,
+			BLND_SCL_V_UPDATE_LOCK, &scl,
+			BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, &dcp_grph_surf,
+			BLND_BLND_V_UPDATE_LOCK, &blnd,
+			BLND_V_UPDATE_LOCK_MODE, &update_lock_mode);
+
+	if (control_mask & PIPE_LOCK_CONTROL_GRAPHICS)
+		dcp_grph = lock_val;
+
+	if (control_mask & PIPE_LOCK_CONTROL_SCL)
+		scl = lock_val;
+
+	if (control_mask & PIPE_LOCK_CONTROL_SURFACE)
+		dcp_grph_surf = lock_val;
+
+	if (control_mask & PIPE_LOCK_CONTROL_BLENDER)
+		blnd = lock_val;
+
+	if (control_mask & PIPE_LOCK_CONTROL_MODE)
+		update_lock_mode = lock_val;
+
+	REG_SET_5(BLND_V_UPDATE_LOCK[blnd_inst], val,
+			BLND_DCP_GRPH_V_UPDATE_LOCK, dcp_grph,
+			BLND_SCL_V_UPDATE_LOCK, scl,
+			BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, dcp_grph_surf,
+			BLND_BLND_V_UPDATE_LOCK, blnd,
+			BLND_V_UPDATE_LOCK_MODE, update_lock_mode);
+
+	if (hws->wa.blnd_crtc_trigger)
+		if (!lock && (control_mask & PIPE_LOCK_CONTROL_BLENDER)) {
+			uint32_t value = REG_READ(CRTC_H_BLANK_START_END[blnd_inst]);
+			REG_WRITE(CRTC_H_BLANK_START_END[blnd_inst], value);
+		}
+}
+
+void dce_set_blender_mode(struct dce_hwseq *hws,
+	unsigned int blnd_inst,
+	enum blnd_mode mode)
+{
+	uint32_t feedthrough = 1;
+	uint32_t blnd_mode = 0;
+	uint32_t multiplied_mode = 0;
+	uint32_t alpha_mode = 2;
+
+	switch (mode) {
+	case BLND_MODE_OTHER_PIPE:
+		feedthrough = 0;
+		blnd_mode = 1;
+		alpha_mode = 0;
+		break;
+	case BLND_MODE_BLENDING:
+		feedthrough = 0;
+		blnd_mode = 2;
+		alpha_mode = 0;
+		multiplied_mode = 1;
+		break;
+	case BLND_MODE_CURRENT_PIPE:
+	default:
+		if (REG(BLND_CONTROL[blnd_inst]) == REG(BLNDV_CONTROL) ||
+				blnd_inst == 0)
+			feedthrough = 0;
+		break;
+	}
+
+	REG_UPDATE_4(BLND_CONTROL[blnd_inst],
+		BLND_FEEDTHROUGH_EN, feedthrough,
+		BLND_ALPHA_MODE, alpha_mode,
+		BLND_MODE, blnd_mode,
+		BLND_MULTIPLIED_MODE, multiplied_mode);
+}
+
+
+static void dce_disable_sram_shut_down(struct dce_hwseq *hws)
+{
+	if (REG(DC_MEM_GLOBAL_PWR_REQ_CNTL))
+		REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL,
+				DC_MEM_GLOBAL_PWR_REQ_DIS, 1);
+}
+
+static void dce_underlay_clock_enable(struct dce_hwseq *hws)
+{
+	/* todo: why do we need this at boot? is dce_enable_fe_clock enough? */
+	if (REG(DCFEV_CLOCK_CONTROL))
+		REG_UPDATE(DCFEV_CLOCK_CONTROL,
+				DCFEV_CLOCK_ENABLE, 1);
+}
+
+static void enable_hw_base_light_sleep(void)
+{
+	/* TODO: implement */
+}
+
+static void disable_sw_manual_control_light_sleep(void)
+{
+	/* TODO: implement */
+}
+
+void dce_clock_gating_power_up(struct dce_hwseq *hws,
+		bool enable)
+{
+	if (enable) {
+		enable_hw_base_light_sleep();
+		disable_sw_manual_control_light_sleep();
+	} else {
+		dce_disable_sram_shut_down(hws);
+		dce_underlay_clock_enable(hws);
+	}
+}
+
+void dce_crtc_switch_to_clk_src(struct dce_hwseq *hws,
+		struct clock_source *clk_src,
+		unsigned int tg_inst)
+{
+	if (clk_src->id == CLOCK_SOURCE_ID_DP_DTO) {
+		REG_UPDATE(PIXEL_RATE_CNTL[tg_inst],
+				DP_DTO0_ENABLE, 1);
+
+	} else if (clk_src->id >= CLOCK_SOURCE_COMBO_PHY_PLL0) {
+		uint32_t rate_source = clk_src->id - CLOCK_SOURCE_COMBO_PHY_PLL0;
+
+		REG_UPDATE_2(PHYPLL_PIXEL_RATE_CNTL[tg_inst],
+				PHYPLL_PIXEL_RATE_SOURCE, rate_source,
+				PIXEL_RATE_PLL_SOURCE, 0);
+
+		REG_UPDATE(PIXEL_RATE_CNTL[tg_inst],
+				DP_DTO0_ENABLE, 0);
+
+	} else if (clk_src->id <= CLOCK_SOURCE_ID_PLL2) {
+		uint32_t rate_source = clk_src->id - CLOCK_SOURCE_ID_PLL0;
+
+		REG_UPDATE_2(PIXEL_RATE_CNTL[tg_inst],
+				PIXEL_RATE_SOURCE, rate_source,
+				DP_DTO0_ENABLE, 0);
+
+		if (REG(PHYPLL_PIXEL_RATE_CNTL[tg_inst]))
+			REG_UPDATE(PHYPLL_PIXEL_RATE_CNTL[tg_inst],
+					PIXEL_RATE_PLL_SOURCE, 1);
+	} else {
+		DC_ERR("unknown clock source");
+	}
+}

+ 250 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h

@@ -0,0 +1,250 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DCE_HWSEQ_H__
+#define __DCE_HWSEQ_H__
+
+#include "hw_sequencer.h"
+
+#define HWSEQ_DCEF_REG_LIST_DCE8() \
+	.DCFE_CLOCK_CONTROL[0] = mmCRTC0_CRTC_DCFE_CLOCK_CONTROL, \
+	.DCFE_CLOCK_CONTROL[1] = mmCRTC1_CRTC_DCFE_CLOCK_CONTROL, \
+	.DCFE_CLOCK_CONTROL[2] = mmCRTC2_CRTC_DCFE_CLOCK_CONTROL, \
+	.DCFE_CLOCK_CONTROL[3] = mmCRTC3_CRTC_DCFE_CLOCK_CONTROL, \
+	.DCFE_CLOCK_CONTROL[4] = mmCRTC4_CRTC_DCFE_CLOCK_CONTROL, \
+	.DCFE_CLOCK_CONTROL[5] = mmCRTC5_CRTC_DCFE_CLOCK_CONTROL
+
+#define HWSEQ_DCEF_REG_LIST() \
+	SRII(DCFE_CLOCK_CONTROL, DCFE, 0), \
+	SRII(DCFE_CLOCK_CONTROL, DCFE, 1), \
+	SRII(DCFE_CLOCK_CONTROL, DCFE, 2), \
+	SRII(DCFE_CLOCK_CONTROL, DCFE, 3), \
+	SRII(DCFE_CLOCK_CONTROL, DCFE, 4), \
+	SRII(DCFE_CLOCK_CONTROL, DCFE, 5), \
+	SR(DC_MEM_GLOBAL_PWR_REQ_CNTL)
+
+#define HWSEQ_BLND_REG_LIST() \
+	SRII(BLND_V_UPDATE_LOCK, BLND, 0), \
+	SRII(BLND_V_UPDATE_LOCK, BLND, 1), \
+	SRII(BLND_V_UPDATE_LOCK, BLND, 2), \
+	SRII(BLND_V_UPDATE_LOCK, BLND, 3), \
+	SRII(BLND_V_UPDATE_LOCK, BLND, 4), \
+	SRII(BLND_V_UPDATE_LOCK, BLND, 5), \
+	SRII(BLND_CONTROL, BLND, 0), \
+	SRII(BLND_CONTROL, BLND, 1), \
+	SRII(BLND_CONTROL, BLND, 2), \
+	SRII(BLND_CONTROL, BLND, 3), \
+	SRII(BLND_CONTROL, BLND, 4), \
+	SRII(BLND_CONTROL, BLND, 5)
+
+#define HWSEQ_PIXEL_RATE_REG_LIST(blk) \
+	SRII(PIXEL_RATE_CNTL, blk, 0), \
+	SRII(PIXEL_RATE_CNTL, blk, 1), \
+	SRII(PIXEL_RATE_CNTL, blk, 2), \
+	SRII(PIXEL_RATE_CNTL, blk, 3), \
+	SRII(PIXEL_RATE_CNTL, blk, 4), \
+	SRII(PIXEL_RATE_CNTL, blk, 5)
+
+#define HWSEQ_PHYPLL_REG_LIST(blk) \
+	SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
+	SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1), \
+	SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 2), \
+	SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \
+	SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4), \
+	SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 5)
+
+#define HWSEQ_DCE11_REG_LIST_BASE() \
+	SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
+	SR(DCFEV_CLOCK_CONTROL), \
+	SRII(DCFE_CLOCK_CONTROL, DCFE, 0), \
+	SRII(DCFE_CLOCK_CONTROL, DCFE, 1), \
+	SRII(CRTC_H_BLANK_START_END, CRTC, 0),\
+	SRII(CRTC_H_BLANK_START_END, CRTC, 1),\
+	SRII(BLND_V_UPDATE_LOCK, BLND, 0),\
+	SRII(BLND_V_UPDATE_LOCK, BLND, 1),\
+	SRII(BLND_CONTROL, BLND, 0),\
+	SRII(BLND_CONTROL, BLND, 1),\
+	SR(BLNDV_CONTROL),\
+	HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
+
+#define HWSEQ_DCE8_REG_LIST() \
+	HWSEQ_DCEF_REG_LIST_DCE8(), \
+	HWSEQ_BLND_REG_LIST(), \
+	HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
+
+#define HWSEQ_DCE10_REG_LIST() \
+	HWSEQ_DCEF_REG_LIST(), \
+	HWSEQ_BLND_REG_LIST(), \
+	HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
+
+#define HWSEQ_ST_REG_LIST() \
+	HWSEQ_DCE11_REG_LIST_BASE(), \
+	.DCFE_CLOCK_CONTROL[2] = mmDCFEV_CLOCK_CONTROL, \
+	.CRTC_H_BLANK_START_END[2] = mmCRTCV_H_BLANK_START_END, \
+	.BLND_V_UPDATE_LOCK[2] = mmBLNDV_V_UPDATE_LOCK, \
+	.BLND_CONTROL[2] = mmBLNDV_CONTROL,
+
+#define HWSEQ_CZ_REG_LIST() \
+	HWSEQ_DCE11_REG_LIST_BASE(), \
+	SRII(DCFE_CLOCK_CONTROL, DCFE, 2), \
+	SRII(CRTC_H_BLANK_START_END, CRTC, 2), \
+	SRII(BLND_V_UPDATE_LOCK, BLND, 2), \
+	SRII(BLND_CONTROL, BLND, 2), \
+	.DCFE_CLOCK_CONTROL[3] = mmDCFEV_CLOCK_CONTROL, \
+	.CRTC_H_BLANK_START_END[3] = mmCRTCV_H_BLANK_START_END, \
+	.BLND_V_UPDATE_LOCK[3] = mmBLNDV_V_UPDATE_LOCK, \
+	.BLND_CONTROL[3] = mmBLNDV_CONTROL
+
+#define HWSEQ_DCE112_REG_LIST() \
+	HWSEQ_DCE10_REG_LIST(), \
+	HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
+	HWSEQ_PHYPLL_REG_LIST(CRTC)
+
+struct dce_hwseq_registers {
+	uint32_t DCFE_CLOCK_CONTROL[6];
+	uint32_t DCFEV_CLOCK_CONTROL;
+	uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL;
+	uint32_t BLND_V_UPDATE_LOCK[6];
+	uint32_t BLND_CONTROL[6];
+	uint32_t BLNDV_CONTROL;
+
+	uint32_t CRTC_H_BLANK_START_END[6];
+	uint32_t PIXEL_RATE_CNTL[6];
+	uint32_t PHYPLL_PIXEL_RATE_CNTL[6];
+};
+ /* set field name */
+#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
+	.field_name = blk_name ## reg_name ## __ ## field_name ## post_fix
+
+#define HWS_SF1(blk_name, reg_name, field_name, post_fix)\
+	.field_name = blk_name ## reg_name ## __ ## blk_name ## field_name ## post_fix
+
+
+#define HWSEQ_DCEF_MASK_SH_LIST(mask_sh, blk)\
+	HWS_SF(blk, CLOCK_CONTROL, DCFE_CLOCK_ENABLE, mask_sh),\
+	SF(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, mask_sh)
+
+#define HWSEQ_BLND_MASK_SH_LIST(mask_sh, blk)\
+	HWS_SF(blk, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
+	HWS_SF(blk, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
+	HWS_SF(blk, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
+	HWS_SF(blk, V_UPDATE_LOCK, BLND_BLND_V_UPDATE_LOCK, mask_sh),\
+	HWS_SF(blk, V_UPDATE_LOCK, BLND_V_UPDATE_LOCK_MODE, mask_sh),\
+	HWS_SF(blk, CONTROL, BLND_FEEDTHROUGH_EN, mask_sh),\
+	HWS_SF(blk, CONTROL, BLND_ALPHA_MODE, mask_sh),\
+	HWS_SF(blk, CONTROL, BLND_MODE, mask_sh),\
+	HWS_SF(blk, CONTROL, BLND_MULTIPLIED_MODE, mask_sh)
+
+#define HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, blk)\
+	HWS_SF1(blk, PIXEL_RATE_CNTL, PIXEL_RATE_SOURCE, mask_sh),\
+	HWS_SF(blk, PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh)
+
+#define HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, blk)\
+	HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh),\
+	HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh)
+
+#define HWSEQ_DCE8_MASK_SH_LIST(mask_sh)\
+	.DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \
+	HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
+	HWS_SF(BLND_, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
+	HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
+	HWS_SF(BLND_, CONTROL, BLND_MODE, mask_sh),\
+	HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
+
+#define HWSEQ_DCE10_MASK_SH_LIST(mask_sh)\
+	HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE_),\
+	HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND_),\
+	HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
+
+#define HWSEQ_DCE11_MASK_SH_LIST(mask_sh)\
+	HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
+	SF(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, mask_sh),\
+	HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
+
+#define HWSEQ_DCE112_MASK_SH_LIST(mask_sh)\
+	HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
+	HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_)
+
+#define HWSEQ_REG_FIED_LIST(type) \
+	type DCFE_CLOCK_ENABLE; \
+	type DCFEV_CLOCK_ENABLE; \
+	type DC_MEM_GLOBAL_PWR_REQ_DIS; \
+	type BLND_DCP_GRPH_V_UPDATE_LOCK; \
+	type BLND_SCL_V_UPDATE_LOCK; \
+	type BLND_DCP_GRPH_SURF_V_UPDATE_LOCK; \
+	type BLND_BLND_V_UPDATE_LOCK; \
+	type BLND_V_UPDATE_LOCK_MODE; \
+	type BLND_FEEDTHROUGH_EN; \
+	type BLND_ALPHA_MODE; \
+	type BLND_MODE; \
+	type BLND_MULTIPLIED_MODE; \
+	type DP_DTO0_ENABLE; \
+	type PIXEL_RATE_SOURCE; \
+	type PHYPLL_PIXEL_RATE_SOURCE; \
+	type PIXEL_RATE_PLL_SOURCE; \
+
+struct dce_hwseq_shift {
+	HWSEQ_REG_FIED_LIST(uint8_t)
+};
+
+struct dce_hwseq_mask {
+	HWSEQ_REG_FIED_LIST(uint32_t)
+};
+
+struct dce_hwseq_wa {
+	bool blnd_crtc_trigger;
+};
+
+struct dce_hwseq {
+	struct dc_context *ctx;
+	const struct dce_hwseq_registers *regs;
+	const struct dce_hwseq_shift *shifts;
+	const struct dce_hwseq_mask *masks;
+	struct dce_hwseq_wa wa;
+};
+
+enum blnd_mode {
+	BLND_MODE_CURRENT_PIPE = 0,/* Data from current pipe only */
+	BLND_MODE_OTHER_PIPE, /* Data from other pipe only */
+	BLND_MODE_BLENDING,/* Alpha blending - blend 'current' and 'other' */
+};
+
+void dce_enable_fe_clock(struct dce_hwseq *hwss,
+		unsigned int inst, bool enable);
+
+void dce_pipe_control_lock(struct dce_hwseq *hws,
+		unsigned int blnd_inst,
+		enum pipe_lock_control control_mask,
+		bool lock);
+
+void dce_set_blender_mode(struct dce_hwseq *hws,
+	unsigned int blnd_inst, enum blnd_mode mode);
+
+void dce_clock_gating_power_up(struct dce_hwseq *hws,
+		bool enable);
+
+void dce_crtc_switch_to_clk_src(struct dce_hwseq *hws,
+		struct clock_source *clk_src,
+		unsigned int tg_inst);
+#endif   /*__DCE_HWSEQ_H__*/

+ 2176 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c

@@ -0,0 +1,2176 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dce_link_encoder.h"
+#include "stream_encoder.h"
+#include "i2caux_interface.h"
+#include "dc_bios_types.h"
+
+#include "gpio_service_interface.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "dce/dce_11_0_enum.h"
+
+#ifndef ATOM_S2_CURRENT_BL_LEVEL_MASK
+#define ATOM_S2_CURRENT_BL_LEVEL_MASK   0x0000FF00L
+#define ATOM_S2_VRI_BRIGHT_ENABLE       0x20000000L
+#endif
+
+#ifndef ATOM_S2_CURRENT_BL_LEVEL_SHIFT
+#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT  8
+#endif
+
+#ifndef HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK  0x10000000L
+#endif
+
+#ifndef HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT  0x1c
+#endif
+
+#define CTX \
+	enc110->base.ctx
+
+#define REG(reg)\
+	(enc110->link_regs->reg)
+
+#define AUX_REG(reg)\
+	(enc110->aux_regs->reg)
+
+#define HPD_REG(reg)\
+	(enc110->hpd_regs->reg)
+
+/* For current ASICs pixel clock - 600MHz */
+#define MAX_ENCODER_CLK 600000
+
+#define DCE11_UNIPHY_MAX_PIXEL_CLK_IN_KHZ 594000
+
+#define DEFAULT_AUX_MAX_DATA_SIZE 16
+#define AUX_MAX_DEFER_WRITE_RETRY 20
+/*
+ * @brief
+ * Trigger Source Select
+ * ASIC-dependent, actual values for register programming
+ */
+#define DCE110_DIG_FE_SOURCE_SELECT_INVALID 0x0
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGA 0x1
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGB 0x2
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGC 0x4
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGD 0x08
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGE 0x10
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20
+
+/* all values are in milliseconds */
+/* For eDP, after power-up/power/down,
+ * 300/500 msec max. delay from LCDVCC to black video generation */
+#define PANEL_POWER_UP_TIMEOUT 300
+#define PANEL_POWER_DOWN_TIMEOUT 500
+#define HPD_CHECK_INTERVAL 10
+
+/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
+#define TMDS_MIN_PIXEL_CLOCK 25000
+/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
+#define TMDS_MAX_PIXEL_CLOCK 165000
+/* For current ASICs pixel clock - 600MHz */
+#define MAX_ENCODER_CLOCK 600000
+
+/* Set the ABM Pipe */
+#define MCP_ABM_PIPE_SET 0x66
+/* Set the ABM level */
+#define MCP_ABM_LEVEL_SET 0x65
+/* Set backlight level */
+#define MCP_BL_SET 0x67
+
+/* PSR related commands */
+#define PSR_ENABLE 0x20
+#define PSR_EXIT 0x21
+#define PSR_SET 0x23
+
+/*TODO: Used for psr wakeup for set backlight level*/
+static unsigned int psr_crtc_offset;
+
+/* registers setting needs to be save and restored used at InitBacklight */
+static struct dce110_abm_backlight_registers stored_backlight_registers;
+
+enum {
+	DP_MST_UPDATE_MAX_RETRY = 50
+};
+
+#define DIG_REG(reg)\
+	(reg + enc110->offsets.dig)
+
+#define DP_REG(reg)\
+	(reg + enc110->offsets.dp)
+
+static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
+	.validate_output_with_stream =
+		dce110_link_encoder_validate_output_with_stream,
+	.hw_init = dce110_link_encoder_hw_init,
+	.setup = dce110_link_encoder_setup,
+	.enable_tmds_output = dce110_link_encoder_enable_tmds_output,
+	.enable_dp_output = dce110_link_encoder_enable_dp_output,
+	.enable_dp_mst_output = dce110_link_encoder_enable_dp_mst_output,
+	.disable_output = dce110_link_encoder_disable_output,
+	.dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings,
+	.dp_set_phy_pattern = dce110_link_encoder_dp_set_phy_pattern,
+	.update_mst_stream_allocation_table =
+		dce110_link_encoder_update_mst_stream_allocation_table,
+	.set_lcd_backlight_level = dce110_link_encoder_set_lcd_backlight_level,
+	.set_dmcu_backlight_level =
+			dce110_link_encoder_set_dmcu_backlight_level,
+	.init_dmcu_backlight_settings =
+			dce110_link_encoder_init_dmcu_backlight_settings,
+	.set_dmcu_abm_level = dce110_link_encoder_set_dmcu_abm_level,
+	.set_dmcu_psr_enable = dce110_link_encoder_set_dmcu_psr_enable,
+	.setup_dmcu_psr = dce110_link_encoder_setup_dmcu_psr,
+	.backlight_control = dce110_link_encoder_edp_backlight_control,
+	.power_control = dce110_link_encoder_edp_power_control,
+	.connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe,
+	.enable_hpd = dce110_link_encoder_enable_hpd,
+	.disable_hpd = dce110_link_encoder_disable_hpd,
+	.destroy = dce110_link_encoder_destroy
+};
+
+
+static enum bp_result link_transmitter_control(
+	struct dce110_link_encoder *enc110,
+	struct bp_transmitter_control *cntl)
+{
+	enum bp_result result;
+	struct dc_bios *bp = enc110->base.ctx->dc_bios;
+
+	result = bp->funcs->transmitter_control(bp, cntl);
+
+	return result;
+}
+
+static void enable_phy_bypass_mode(
+	struct dce110_link_encoder *enc110,
+	bool enable)
+{
+	/* This register resides in DP back end block;
+	 * transmitter is used for the offset */
+
+	REG_UPDATE(DP_DPHY_CNTL, DPHY_BYPASS, enable);
+
+}
+
+static void disable_prbs_symbols(
+	struct dce110_link_encoder *enc110,
+	bool disable)
+{
+	/* This register resides in DP back end block;
+	 * transmitter is used for the offset */
+
+	REG_UPDATE_4(DP_DPHY_CNTL,
+			DPHY_ATEST_SEL_LANE0, disable,
+			DPHY_ATEST_SEL_LANE1, disable,
+			DPHY_ATEST_SEL_LANE2, disable,
+			DPHY_ATEST_SEL_LANE3, disable);
+}
+
+static void disable_prbs_mode(
+	struct dce110_link_encoder *enc110)
+{
+	/* This register resides in DP back end block;
+	 * transmitter is used for the offset */
+
+	REG_UPDATE(DP_DPHY_PRBS_CNTL, DPHY_PRBS_EN, 0);
+}
+
+static void program_pattern_symbols(
+	struct dce110_link_encoder *enc110,
+	uint16_t pattern_symbols[8])
+{
+	/* This register resides in DP back end block;
+	 * transmitter is used for the offset */
+
+	REG_SET_3(DP_DPHY_SYM0, 0,
+			DPHY_SYM1, pattern_symbols[0],
+			DPHY_SYM2, pattern_symbols[1],
+			DPHY_SYM3, pattern_symbols[2]);
+
+	/* This register resides in DP back end block;
+	 * transmitter is used for the offset */
+
+	REG_SET_3(DP_DPHY_SYM1, 0,
+			DPHY_SYM4, pattern_symbols[3],
+			DPHY_SYM5, pattern_symbols[4],
+			DPHY_SYM6, pattern_symbols[5]);
+
+	/* This register resides in DP back end block;
+	 * transmitter is used for the offset */
+
+	REG_SET_2(DP_DPHY_SYM2, 0,
+			DPHY_SYM7, pattern_symbols[6],
+			DPHY_SYM8, pattern_symbols[7]);
+}
+
+static void set_dp_phy_pattern_d102(
+	struct dce110_link_encoder *enc110)
+{
+	/* Disable PHY Bypass mode to setup the test pattern */
+	enable_phy_bypass_mode(enc110, false);
+
+	/* For 10-bit PRBS or debug symbols
+	 * please use the following sequence: */
+
+	/* Enable debug symbols on the lanes */
+
+	disable_prbs_symbols(enc110, true);
+
+	/* Disable PRBS mode,
+	 * make sure DPHY_PRBS_CNTL.DPHY_PRBS_EN=0 */
+
+	disable_prbs_mode(enc110);
+
+	/* Program debug symbols to be output */
+	{
+		uint16_t pattern_symbols[8] = {
+			0x2AA, 0x2AA, 0x2AA, 0x2AA,
+			0x2AA, 0x2AA, 0x2AA, 0x2AA
+		};
+
+		program_pattern_symbols(enc110, pattern_symbols);
+	}
+
+	/* Enable phy bypass mode to enable the test pattern */
+
+	enable_phy_bypass_mode(enc110, true);
+}
+
+static void set_link_training_complete(
+	struct dce110_link_encoder *enc110,
+	bool complete)
+{
+	/* This register resides in DP back end block;
+	 * transmitter is used for the offset */
+
+	REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, complete);
+
+}
+
+void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
+	struct link_encoder *enc,
+	uint32_t index)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	/* Write Training Pattern */
+
+	REG_WRITE(DP_DPHY_TRAINING_PATTERN_SEL, index);
+
+	/* Set HW Register Training Complete to false */
+
+	set_link_training_complete(enc110, false);
+
+	/* Disable PHY Bypass mode to output Training Pattern */
+
+	enable_phy_bypass_mode(enc110, false);
+
+	/* Disable PRBS mode,
+	 * make sure DPHY_PRBS_CNTL.DPHY_PRBS_EN=0 */
+
+	disable_prbs_mode(enc110);
+}
+
+static void set_dp_phy_pattern_symbol_error(
+	struct dce110_link_encoder *enc110)
+{
+	/* Disable PHY Bypass mode to setup the test pattern */
+	uint32_t value = 0x0;
+
+	enable_phy_bypass_mode(enc110, false);
+
+	/* program correct panel mode*/
+	{
+		ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
+		/*DCE 120 does not have this reg*/
+
+		REG_WRITE(DP_DPHY_INTERNAL_CTRL, value);
+	}
+
+	/* A PRBS23 pattern is used for most DP electrical measurements. */
+
+	/* Enable PRBS symbols on the lanes */
+
+	disable_prbs_symbols(enc110, false);
+
+	/* For PRBS23 Set bit DPHY_PRBS_SEL=1 and Set bit DPHY_PRBS_EN=1 */
+	{
+		REG_UPDATE_2(DP_DPHY_PRBS_CNTL,
+					DPHY_PRBS_SEL, 1,
+					DPHY_PRBS_EN, 1);
+	}
+
+	/* Enable phy bypass mode to enable the test pattern */
+
+	enable_phy_bypass_mode(enc110, true);
+}
+
+static void set_dp_phy_pattern_prbs7(
+	struct dce110_link_encoder *enc110)
+{
+	/* Disable PHY Bypass mode to setup the test pattern */
+
+	enable_phy_bypass_mode(enc110, false);
+
+	/* A PRBS7 pattern is used for most DP electrical measurements. */
+
+	/* Enable PRBS symbols on the lanes */
+
+	disable_prbs_symbols(enc110, false);
+
+	/* For PRBS7 Set bit DPHY_PRBS_SEL=0 and Set bit DPHY_PRBS_EN=1 */
+	{
+		REG_UPDATE_2(DP_DPHY_PRBS_CNTL,
+					DPHY_PRBS_SEL, 0,
+					DPHY_PRBS_EN, 1);
+	}
+
+	/* Enable phy bypass mode to enable the test pattern */
+
+	enable_phy_bypass_mode(enc110, true);
+}
+
+static void set_dp_phy_pattern_80bit_custom(
+	struct dce110_link_encoder *enc110,
+	const uint8_t *pattern)
+{
+	/* Disable PHY Bypass mode to setup the test pattern */
+	enable_phy_bypass_mode(enc110, false);
+
+	/* Enable debug symbols on the lanes */
+
+	disable_prbs_symbols(enc110, true);
+
+	/* Enable PHY bypass mode to enable the test pattern */
+	/* TODO is it really needed ? */
+
+	enable_phy_bypass_mode(enc110, true);
+
+	/* Program 80 bit custom pattern */
+	{
+		uint16_t pattern_symbols[8];
+
+		pattern_symbols[0] =
+			((pattern[1] & 0x03) << 8) | pattern[0];
+		pattern_symbols[1] =
+			((pattern[2] & 0x0f) << 6) | ((pattern[1] >> 2) & 0x3f);
+		pattern_symbols[2] =
+			((pattern[3] & 0x3f) << 4) | ((pattern[2] >> 4) & 0x0f);
+		pattern_symbols[3] =
+			(pattern[4] << 2) | ((pattern[3] >> 6) & 0x03);
+		pattern_symbols[4] =
+			((pattern[6] & 0x03) << 8) | pattern[5];
+		pattern_symbols[5] =
+			((pattern[7] & 0x0f) << 6) | ((pattern[6] >> 2) & 0x3f);
+		pattern_symbols[6] =
+			((pattern[8] & 0x3f) << 4) | ((pattern[7] >> 4) & 0x0f);
+		pattern_symbols[7] =
+			(pattern[9] << 2) | ((pattern[8] >> 6) & 0x03);
+
+		program_pattern_symbols(enc110, pattern_symbols);
+	}
+
+	/* Enable phy bypass mode to enable the test pattern */
+
+	enable_phy_bypass_mode(enc110, true);
+}
+
+static void set_dp_phy_pattern_hbr2_compliance(
+	struct dce110_link_encoder *enc110)
+{
+
+	/* previously there is a register DP_HBR2_EYE_PATTERN
+	 * that is enabled to get the pattern.
+	 * But it does not work with the latest spec change,
+	 * so we are programming the following registers manually.
+	 *
+	 * The following settings have been confirmed
+	 * by Nick Chorney and Sandra Liu */
+
+	/* Disable PHY Bypass mode to setup the test pattern */
+
+	enable_phy_bypass_mode(enc110, false);
+
+	/* Setup DIG encoder in DP SST mode */
+
+	enc110->base.funcs->setup(&enc110->base, SIGNAL_TYPE_DISPLAY_PORT);
+
+	/* program correct panel mode*/
+	{
+		ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
+
+		REG_WRITE(DP_DPHY_INTERNAL_CTRL, 0x0);
+	}
+
+	/* no vbid after BS (SR)
+	 * DP_LINK_FRAMING_CNTL changed history Sandra Liu
+	 * 11000260 / 11000104 / 110000FC */
+
+	/* TODO DP_LINK_FRAMING_CNTL should always use hardware default value
+	 * output  except output hbr2_compliance pattern for physical PHY
+	 * measurement. This is not normal usage case. SW should reset this
+	 * register to hardware default value after end use of HBR2 eye
+	 */
+	BREAK_TO_DEBUGGER();
+	/* TODO: do we still need this, find out at compliance test
+	addr = mmDP_LINK_FRAMING_CNTL + fe_addr_offset;
+
+	value = dal_read_reg(ctx, addr);
+
+	set_reg_field_value(value, 0xFC,
+			DP_LINK_FRAMING_CNTL, DP_IDLE_BS_INTERVAL);
+	set_reg_field_value(value, 1,
+			DP_LINK_FRAMING_CNTL, DP_VBID_DISABLE);
+	set_reg_field_value(value, 1,
+			DP_LINK_FRAMING_CNTL, DP_VID_ENHANCED_FRAME_MODE);
+
+	dal_write_reg(ctx, addr, value);
+	 */
+	/* swap every BS with SR */
+
+	REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0);
+
+	/*TODO add support for this test pattern
+	 * support_dp_hbr2_eye_pattern
+	 */
+
+	/* set link training complete */
+	set_link_training_complete(enc110, true);
+	/* do not enable video stream */
+
+	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
+
+	/* Disable PHY Bypass mode to setup the test pattern */
+
+	enable_phy_bypass_mode(enc110, false);
+}
+
+static void set_dp_phy_pattern_passthrough_mode(
+	struct dce110_link_encoder *enc110,
+	enum dp_panel_mode panel_mode)
+{
+	uint32_t value;
+
+	/* program correct panel mode */
+	{
+		ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
+		value = REG_READ(DP_DPHY_INTERNAL_CTRL);
+
+		switch (panel_mode) {
+		case DP_PANEL_MODE_EDP:
+			value = 0x1;
+		break;
+		case DP_PANEL_MODE_SPECIAL:
+			value = 0x11;
+		break;
+		default:
+			value = 0x0;
+			break;
+		}
+
+		REG_WRITE(DP_DPHY_INTERNAL_CTRL, value);
+	}
+
+	REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0x1FF);
+
+	/* set link training complete */
+
+	set_link_training_complete(enc110, true);
+
+	/* Disable PHY Bypass mode to setup the test pattern */
+
+	enable_phy_bypass_mode(enc110, false);
+
+	/* Disable PRBS mode,
+	 * make sure DPHY_PRBS_CNTL.DPHY_PRBS_EN=0 */
+
+	disable_prbs_mode(enc110);
+}
+
+/* return value is bit-vector */
+static uint8_t get_frontend_source(
+	enum engine_id engine)
+{
+	switch (engine) {
+	case ENGINE_ID_DIGA:
+		return DCE110_DIG_FE_SOURCE_SELECT_DIGA;
+	case ENGINE_ID_DIGB:
+		return DCE110_DIG_FE_SOURCE_SELECT_DIGB;
+	case ENGINE_ID_DIGC:
+		return DCE110_DIG_FE_SOURCE_SELECT_DIGC;
+	case ENGINE_ID_DIGD:
+		return DCE110_DIG_FE_SOURCE_SELECT_DIGD;
+	case ENGINE_ID_DIGE:
+		return DCE110_DIG_FE_SOURCE_SELECT_DIGE;
+	case ENGINE_ID_DIGF:
+		return DCE110_DIG_FE_SOURCE_SELECT_DIGF;
+	default:
+		ASSERT_CRITICAL(false);
+		return DCE110_DIG_FE_SOURCE_SELECT_INVALID;
+	}
+}
+
+static void configure_encoder(
+	struct dce110_link_encoder *enc110,
+	const struct dc_link_settings *link_settings)
+{
+	/* set number of lanes */
+
+	REG_SET(DP_CONFIG, 0,
+			DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE);
+
+	/* setup scrambler */
+	REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, 1);
+}
+
+static bool is_panel_powered_on(struct dce110_link_encoder *enc110)
+{
+	bool ret;
+	uint32_t value;
+
+	REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &value);
+	ret = value;
+
+	return ret == 1;
+}
+
+
+/* TODO duplicate of dc_link.c version */
+static struct gpio *get_hpd_gpio(const struct link_encoder *enc)
+{
+	enum bp_result bp_result;
+	struct dc_bios *dcb = enc->ctx->dc_bios;
+	struct graphics_object_hpd_info hpd_info;
+	struct gpio_pin_info pin_info;
+
+	if (dcb->funcs->get_hpd_info(dcb, enc->connector, &hpd_info) != BP_RESULT_OK)
+		return NULL;
+
+	bp_result = dcb->funcs->get_gpio_pin_info(dcb,
+		hpd_info.hpd_int_gpio_uid, &pin_info);
+
+	if (bp_result != BP_RESULT_OK) {
+		ASSERT(bp_result == BP_RESULT_NORECORD);
+		return NULL;
+	}
+
+	return dal_gpio_service_create_irq(
+		enc->ctx->gpio_service,
+		pin_info.offset,
+		pin_info.mask);
+}
+
+/*
+ * @brief
+ * eDP only.
+ */
+static void link_encoder_edp_wait_for_hpd_ready(
+	struct dce110_link_encoder *enc110,
+	bool power_up)
+{
+	struct dc_context *ctx = enc110->base.ctx;
+	struct graphics_object_id connector = enc110->base.connector;
+	struct gpio *hpd;
+	bool edp_hpd_high = false;
+	uint32_t time_elapsed = 0;
+	uint32_t timeout = power_up ?
+		PANEL_POWER_UP_TIMEOUT : PANEL_POWER_DOWN_TIMEOUT;
+
+	if (dal_graphics_object_id_get_connector_id(connector) !=
+		CONNECTOR_ID_EDP) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	if (!power_up)
+		/* from KV, we will not HPD low after turning off VCC -
+		 * instead, we will check the SW timer in power_up(). */
+		return;
+
+	/* when we power on/off the eDP panel,
+	 * we need to wait until SENSE bit is high/low */
+
+	/* obtain HPD */
+	/* TODO what to do with this? */
+	hpd = get_hpd_gpio(&enc110->base);
+
+	if (!hpd) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	dal_gpio_open(hpd, GPIO_MODE_INTERRUPT);
+
+	/* wait until timeout or panel detected */
+
+	do {
+		uint32_t detected = 0;
+
+		dal_gpio_get_value(hpd, &detected);
+
+		if (!(detected ^ power_up)) {
+			edp_hpd_high = true;
+			break;
+		}
+
+		msleep(HPD_CHECK_INTERVAL);
+
+		time_elapsed += HPD_CHECK_INTERVAL;
+	} while (time_elapsed < timeout);
+
+	dal_gpio_close(hpd);
+
+	dal_gpio_destroy_irq(&hpd);
+
+	if (false == edp_hpd_high) {
+		dm_logger_write(ctx->logger, LOG_ERROR,
+				"%s: wait timed out!\n", __func__);
+	}
+}
+
+/*
+ * @brief
+ * eDP only. Control the power of the eDP panel.
+ */
+void dce110_link_encoder_edp_power_control(
+	struct link_encoder *enc,
+	bool power_up)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	struct dc_context *ctx = enc110->base.ctx;
+	struct bp_transmitter_control cntl = { 0 };
+	enum bp_result bp_result;
+
+	if (dal_graphics_object_id_get_connector_id(enc110->base.connector) !=
+		CONNECTOR_ID_EDP) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	if ((power_up && !is_panel_powered_on(enc110)) ||
+		(!power_up && is_panel_powered_on(enc110))) {
+
+		/* Send VBIOS command to prompt eDP panel power */
+
+		dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+				"%s: Panel Power action: %s\n",
+				__func__, (power_up ? "On":"Off"));
+
+		cntl.action = power_up ?
+			TRANSMITTER_CONTROL_POWER_ON :
+			TRANSMITTER_CONTROL_POWER_OFF;
+		cntl.transmitter = enc110->base.transmitter;
+		cntl.connector_obj_id = enc110->base.connector;
+		cntl.coherent = false;
+		cntl.lanes_number = LANE_COUNT_FOUR;
+		cntl.hpd_sel = enc110->base.hpd_source;
+
+		bp_result = link_transmitter_control(enc110, &cntl);
+
+		if (BP_RESULT_OK != bp_result) {
+
+			dm_logger_write(ctx->logger, LOG_ERROR,
+					"%s: Panel Power bp_result: %d\n",
+					__func__, bp_result);
+		}
+	} else {
+		dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+				"%s: Skipping Panel Power action: %s\n",
+				__func__, (power_up ? "On":"Off"));
+	}
+
+	link_encoder_edp_wait_for_hpd_ready(enc110, true);
+}
+
+static void aux_initialize(
+	struct dce110_link_encoder *enc110)
+{
+	struct dc_context *ctx = enc110->base.ctx;
+	enum hpd_source_id hpd_source = enc110->base.hpd_source;
+	uint32_t addr = AUX_REG(AUX_CONTROL);
+	uint32_t value = dm_read_reg(ctx, addr);
+
+	set_reg_field_value(value, hpd_source, AUX_CONTROL, AUX_HPD_SEL);
+	set_reg_field_value(value, 0, AUX_CONTROL, AUX_LS_READ_EN);
+	dm_write_reg(ctx, addr, value);
+
+	addr = AUX_REG(AUX_DPHY_RX_CONTROL0);
+	value = dm_read_reg(ctx, addr);
+
+	/* 1/4 window (the maximum allowed) */
+	set_reg_field_value(value, 1,
+			AUX_DPHY_RX_CONTROL0, AUX_RX_RECEIVE_WINDOW);
+	dm_write_reg(ctx, addr, value);
+
+}
+
+/*todo: cloned in stream enc, fix*/
+static bool is_panel_backlight_on(struct dce110_link_encoder *enc110)
+{
+	uint32_t value;
+
+	REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value);
+
+	return value;
+}
+
+/*todo: cloned in stream enc, fix*/
+/*
+ * @brief
+ * eDP only. Control the backlight of the eDP panel
+ */
+void dce110_link_encoder_edp_backlight_control(
+	struct link_encoder *enc,
+	bool enable)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	struct dc_context *ctx = enc110->base.ctx;
+	struct bp_transmitter_control cntl = { 0 };
+
+	if (dal_graphics_object_id_get_connector_id(enc110->base.connector)
+		!= CONNECTOR_ID_EDP) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	if (enable && is_panel_backlight_on(enc110)) {
+		dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+				"%s: panel already powered up. Do nothing.\n",
+				__func__);
+		return;
+	}
+
+	if (!enable && !is_panel_powered_on(enc110)) {
+		dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+				"%s: panel already powered down. Do nothing.\n",
+				__func__);
+		return;
+	}
+
+	/* Send VBIOS command to control eDP panel backlight */
+
+	dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+			"%s: backlight action: %s\n",
+			__func__, (enable ? "On":"Off"));
+
+	cntl.action = enable ?
+		TRANSMITTER_CONTROL_BACKLIGHT_ON :
+		TRANSMITTER_CONTROL_BACKLIGHT_OFF;
+	/*cntl.engine_id = ctx->engine;*/
+	cntl.transmitter = enc110->base.transmitter;
+	cntl.connector_obj_id = enc110->base.connector;
+	/*todo: unhardcode*/
+	cntl.lanes_number = LANE_COUNT_FOUR;
+	cntl.hpd_sel = enc110->base.hpd_source;
+
+	/* For eDP, the following delays might need to be considered
+	 * after link training completed:
+	 * idle period - min. accounts for required BS-Idle pattern,
+	 * max. allows for source frame synchronization);
+	 * 50 msec max. delay from valid video data from source
+	 * to video on dislpay or backlight enable.
+	 *
+	 * Disable the delay for now.
+	 * Enable it in the future if necessary.
+	 */
+	/* dc_service_sleep_in_milliseconds(50); */
+	link_transmitter_control(enc110, &cntl);
+}
+
+static bool is_dig_enabled(const struct dce110_link_encoder *enc110)
+{
+	uint32_t value;
+
+	REG_GET(DIG_BE_EN_CNTL, DIG_ENABLE, &value);
+	return value;
+}
+
+static void link_encoder_disable(struct dce110_link_encoder *enc110)
+{
+	/* reset training pattern */
+	REG_SET(DP_DPHY_TRAINING_PATTERN_SEL, 0,
+			DPHY_TRAINING_PATTERN_SEL, 0);
+
+	/* reset training complete */
+	REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
+
+	/* reset panel mode */
+	ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
+	REG_WRITE(DP_DPHY_INTERNAL_CTRL, 0);
+}
+
+static void hpd_initialize(
+	struct dce110_link_encoder *enc110)
+{
+	/* Associate HPD with DIG_BE */
+	enum hpd_source_id hpd_source = enc110->base.hpd_source;
+
+	REG_UPDATE(DIG_BE_CNTL, DIG_HPD_SELECT, hpd_source);
+}
+
+bool dce110_link_encoder_validate_dvi_output(
+	const struct dce110_link_encoder *enc110,
+	enum signal_type connector_signal,
+	enum signal_type signal,
+	const struct dc_crtc_timing *crtc_timing)
+{
+	uint32_t max_pixel_clock = TMDS_MAX_PIXEL_CLOCK;
+
+	if (enc110->base.features.max_pixel_clock < TMDS_MAX_PIXEL_CLOCK)
+		max_pixel_clock = enc110->base.features.max_pixel_clock;
+
+	if (signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+		max_pixel_clock <<= 1;
+
+	/* This handles the case of HDMI downgrade to DVI we don't want to
+	 * we don't want to cap the pixel clock if the DDI is not DVI.
+	 */
+	if (connector_signal != SIGNAL_TYPE_DVI_DUAL_LINK &&
+			connector_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
+		max_pixel_clock = enc110->base.features.max_pixel_clock;
+
+	/* DVI only support RGB pixel encoding */
+	if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB)
+		return false;
+
+	if (crtc_timing->pix_clk_khz < TMDS_MIN_PIXEL_CLOCK)
+		return false;
+
+	if (crtc_timing->pix_clk_khz > max_pixel_clock)
+		return false;
+
+	/* DVI supports 6/8bpp single-link and 10/16bpp dual-link */
+	switch (crtc_timing->display_color_depth) {
+	case COLOR_DEPTH_666:
+	case COLOR_DEPTH_888:
+	break;
+	case COLOR_DEPTH_101010:
+	case COLOR_DEPTH_161616:
+		if (signal != SIGNAL_TYPE_DVI_DUAL_LINK)
+			return false;
+	break;
+	default:
+		return false;
+	}
+
+	return true;
+}
+
+static bool dce110_link_encoder_validate_hdmi_output(
+	const struct dce110_link_encoder *enc110,
+	const struct dc_crtc_timing *crtc_timing,
+	int adjusted_pix_clk_khz)
+{
+	enum dc_color_depth max_deep_color =
+			enc110->base.features.max_hdmi_deep_color;
+
+	if (max_deep_color > enc110->base.features.max_deep_color)
+		max_deep_color = enc110->base.features.max_deep_color;
+
+	if (max_deep_color < crtc_timing->display_color_depth)
+		return false;
+
+	if (adjusted_pix_clk_khz < TMDS_MIN_PIXEL_CLOCK)
+		return false;
+
+	if ((adjusted_pix_clk_khz == 0) ||
+		(adjusted_pix_clk_khz > enc110->base.features.max_hdmi_pixel_clock) ||
+		(adjusted_pix_clk_khz > enc110->base.features.max_pixel_clock))
+		return false;
+
+	/* DCE11 HW does not support 420 */
+	if (!enc110->base.features.ycbcr420_supported &&
+			crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+		return false;
+
+	return true;
+}
+
+bool dce110_link_encoder_validate_rgb_output(
+	const struct dce110_link_encoder *enc110,
+	const struct dc_crtc_timing *crtc_timing)
+{
+	if (crtc_timing->pix_clk_khz > enc110->base.features.max_pixel_clock)
+		return false;
+
+	if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB)
+		return false;
+
+	return true;
+}
+
+bool dce110_link_encoder_validate_dp_output(
+	const struct dce110_link_encoder *enc110,
+	const struct dc_crtc_timing *crtc_timing)
+{
+	/* default RGB only */
+	if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
+		return true;
+
+	if (enc110->base.features.flags.bits.IS_YCBCR_CAPABLE)
+		return true;
+
+	/* for DCE 8.x or later DP Y-only feature,
+	 * we need ASIC cap + FeatureSupportDPYonly, not support 666 */
+	if (crtc_timing->flags.Y_ONLY &&
+		enc110->base.features.flags.bits.IS_YCBCR_CAPABLE &&
+		crtc_timing->display_color_depth != COLOR_DEPTH_666)
+		return true;
+
+	return false;
+}
+
+bool dce110_link_encoder_validate_wireless_output(
+	const struct dce110_link_encoder *enc110,
+	const struct dc_crtc_timing *crtc_timing)
+{
+	if (crtc_timing->pix_clk_khz > enc110->base.features.max_pixel_clock)
+		return false;
+
+	/* Wireless only supports YCbCr444 */
+	if (crtc_timing->pixel_encoding ==
+			PIXEL_ENCODING_YCBCR444)
+		return true;
+
+	return false;
+}
+
+bool dce110_link_encoder_construct(
+	struct dce110_link_encoder *enc110,
+	const struct encoder_init_data *init_data,
+	const struct dce110_link_enc_registers *link_regs,
+	const struct dce110_link_enc_aux_registers *aux_regs,
+	const struct dce110_link_enc_hpd_registers *hpd_regs)
+{
+	enc110->base.funcs = &dce110_lnk_enc_funcs;
+	enc110->base.ctx = init_data->ctx;
+	enc110->base.id = init_data->encoder;
+
+	enc110->base.hpd_source = init_data->hpd_source;
+	enc110->base.connector = init_data->connector;
+	enc110->base.input_signals = SIGNAL_TYPE_ALL;
+
+	enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+	enc110->base.features.flags.raw = 0;
+
+	enc110->base.transmitter = init_data->transmitter;
+
+	enc110->base.features.flags.bits.IS_AUDIO_CAPABLE = true;
+
+	enc110->base.features.max_pixel_clock =
+			MAX_ENCODER_CLK;
+
+	enc110->base.features.max_deep_color = COLOR_DEPTH_121212;
+	enc110->base.features.max_hdmi_deep_color = COLOR_DEPTH_121212;
+
+	/* set the flag to indicate whether driver poll the I2C data pin
+	 * while doing the DP sink detect
+	 */
+
+/*	if (dal_adapter_service_is_feature_supported(as,
+		FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+		enc110->base.features.flags.bits.
+			DP_SINK_DETECT_POLL_DATA_PIN = true;*/
+
+	enc110->base.output_signals =
+		SIGNAL_TYPE_DVI_SINGLE_LINK |
+		SIGNAL_TYPE_DVI_DUAL_LINK |
+		SIGNAL_TYPE_LVDS |
+		SIGNAL_TYPE_DISPLAY_PORT |
+		SIGNAL_TYPE_DISPLAY_PORT_MST |
+		SIGNAL_TYPE_EDP |
+		SIGNAL_TYPE_HDMI_TYPE_A;
+
+	/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+	 * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+	 * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+	 * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+	 * Prefer DIG assignment is decided by board design.
+	 * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+	 * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+	 * By this, adding DIGG should not hurt DCE 8.0.
+	 * This will let DCE 8.1 share DCE 8.0 as much as possible
+	 */
+
+	enc110->link_regs = link_regs;
+	enc110->aux_regs = aux_regs;
+	enc110->hpd_regs = hpd_regs;
+
+	switch (enc110->base.transmitter) {
+	case TRANSMITTER_UNIPHY_A:
+		enc110->base.preferred_engine = ENGINE_ID_DIGA;
+	break;
+	case TRANSMITTER_UNIPHY_B:
+		enc110->base.preferred_engine = ENGINE_ID_DIGB;
+	break;
+	case TRANSMITTER_UNIPHY_C:
+		enc110->base.preferred_engine = ENGINE_ID_DIGC;
+	break;
+	case TRANSMITTER_UNIPHY_D:
+		enc110->base.preferred_engine = ENGINE_ID_DIGD;
+	break;
+	case TRANSMITTER_UNIPHY_E:
+		enc110->base.preferred_engine = ENGINE_ID_DIGE;
+	break;
+	case TRANSMITTER_UNIPHY_F:
+		enc110->base.preferred_engine = ENGINE_ID_DIGF;
+	break;
+	default:
+		ASSERT_CRITICAL(false);
+		enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+	}
+
+	dm_logger_write(init_data->ctx->logger, LOG_I2C_AUX,
+			"Using channel: %s [%d]\n",
+			DECODE_CHANNEL_ID(init_data->channel),
+			init_data->channel);
+
+	/* Override features with DCE-specific values */
+	{
+	struct bp_encoder_cap_info bp_cap_info = {0};
+	const struct dc_vbios_funcs *bp_funcs = enc110->base.ctx->dc_bios->funcs;
+
+	if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info(
+			enc110->base.ctx->dc_bios, enc110->base.id,
+			&bp_cap_info))
+		enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+				bp_cap_info.DP_HBR2_CAP;
+	}
+	/* test pattern 3 support */
+	enc110->base.features.flags.bits.IS_TPS3_CAPABLE = true;
+
+	enc110->base.features.flags.bits.IS_Y_ONLY_CAPABLE = false;
+	/*
+		dal_adapter_service_is_feature_supported(as,
+			FEATURE_SUPPORT_DP_Y_ONLY);
+*/
+	enc110->base.features.flags.bits.IS_YCBCR_CAPABLE = true;
+	/*
+		dal_adapter_service_is_feature_supported(as,
+			FEATURE_SUPPORT_DP_YUV);
+			*/
+	return true;
+}
+
+bool dce110_link_encoder_validate_output_with_stream(
+	struct link_encoder *enc,
+	struct pipe_ctx *pipe_ctx)
+{
+	struct core_stream *stream = pipe_ctx->stream;
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	bool is_valid;
+
+	switch (pipe_ctx->stream->signal) {
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+		is_valid = dce110_link_encoder_validate_dvi_output(
+			enc110,
+			stream->sink->link->public.connector_signal,
+			pipe_ctx->stream->signal,
+			&stream->public.timing);
+	break;
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		is_valid = dce110_link_encoder_validate_hdmi_output(
+				enc110,
+				&stream->public.timing,
+				stream->phy_pix_clk);
+	break;
+	case SIGNAL_TYPE_RGB:
+		is_valid = dce110_link_encoder_validate_rgb_output(
+			enc110, &stream->public.timing);
+	break;
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+	case SIGNAL_TYPE_EDP:
+		is_valid = dce110_link_encoder_validate_dp_output(
+			enc110, &stream->public.timing);
+	break;
+	case SIGNAL_TYPE_WIRELESS:
+		is_valid = dce110_link_encoder_validate_wireless_output(
+			enc110, &stream->public.timing);
+	break;
+	default:
+		is_valid = true;
+	break;
+	}
+
+	return is_valid;
+}
+
+void dce110_link_encoder_hw_init(
+	struct link_encoder *enc)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	struct dc_context *ctx = enc110->base.ctx;
+	struct bp_transmitter_control cntl = { 0 };
+	enum bp_result result;
+
+	cntl.action = TRANSMITTER_CONTROL_INIT;
+	cntl.engine_id = ENGINE_ID_UNKNOWN;
+	cntl.transmitter = enc110->base.transmitter;
+	cntl.connector_obj_id = enc110->base.connector;
+	cntl.lanes_number = LANE_COUNT_FOUR;
+	cntl.coherent = false;
+	cntl.hpd_sel = enc110->base.hpd_source;
+
+	result = link_transmitter_control(enc110, &cntl);
+
+	if (result != BP_RESULT_OK) {
+		dm_logger_write(ctx->logger, LOG_ERROR,
+			"%s: Failed to execute VBIOS command table!\n",
+			__func__);
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	if (enc110->base.connector.id == CONNECTOR_ID_LVDS) {
+		cntl.action = TRANSMITTER_CONTROL_BACKLIGHT_BRIGHTNESS;
+
+		result = link_transmitter_control(enc110, &cntl);
+
+		ASSERT(result == BP_RESULT_OK);
+
+	} else if (enc110->base.connector.id == CONNECTOR_ID_EDP) {
+		enc->funcs->power_control(&enc110->base, true);
+	}
+	aux_initialize(enc110);
+
+	/* reinitialize HPD.
+	 * hpd_initialize() will pass DIG_FE id to HW context.
+	 * All other routine within HW context will use fe_engine_offset
+	 * as DIG_FE id even caller pass DIG_FE id.
+	 * So this routine must be called first. */
+	hpd_initialize(enc110);
+}
+
+void dce110_link_encoder_destroy(struct link_encoder **enc)
+{
+	dm_free(TO_DCE110_LINK_ENC(*enc));
+	*enc = NULL;
+}
+
+void dce110_link_encoder_setup(
+	struct link_encoder *enc,
+	enum signal_type signal)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+
+	switch (signal) {
+	case SIGNAL_TYPE_EDP:
+	case SIGNAL_TYPE_DISPLAY_PORT:
+		/* DP SST */
+		REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 0);
+		break;
+	case SIGNAL_TYPE_LVDS:
+		/* LVDS */
+		REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 1);
+		break;
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+		/* TMDS-DVI */
+		REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 2);
+		break;
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		/* TMDS-HDMI */
+		REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 3);
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		/* DP MST */
+		REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5);
+		break;
+	default:
+		ASSERT_CRITICAL(false);
+		/* invalid mode ! */
+		break;
+	}
+
+}
+
+/* TODO: still need depth or just pass in adjusted pixel clock? */
+void dce110_link_encoder_enable_tmds_output(
+	struct link_encoder *enc,
+	enum clock_source_id clock_source,
+	enum dc_color_depth color_depth,
+	bool hdmi,
+	bool dual_link,
+	uint32_t pixel_clock)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	struct dc_context *ctx = enc110->base.ctx;
+	struct bp_transmitter_control cntl = { 0 };
+	enum bp_result result;
+
+	/* Enable the PHY */
+
+	cntl.action = TRANSMITTER_CONTROL_ENABLE;
+	cntl.engine_id = ENGINE_ID_UNKNOWN;
+	cntl.transmitter = enc110->base.transmitter;
+	cntl.pll_id = clock_source;
+	if (hdmi) {
+		cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+		cntl.lanes_number = 4;
+	} else if (dual_link) {
+		cntl.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+		cntl.lanes_number = 8;
+	} else {
+		cntl.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+		cntl.lanes_number = 4;
+	}
+	cntl.hpd_sel = enc110->base.hpd_source;
+
+	cntl.pixel_clock = pixel_clock;
+	cntl.color_depth = color_depth;
+
+	result = link_transmitter_control(enc110, &cntl);
+
+	if (result != BP_RESULT_OK) {
+		dm_logger_write(ctx->logger, LOG_ERROR,
+			"%s: Failed to execute VBIOS command table!\n",
+			__func__);
+		BREAK_TO_DEBUGGER();
+	}
+}
+
+/* enables DP PHY output */
+void dce110_link_encoder_enable_dp_output(
+	struct link_encoder *enc,
+	const struct dc_link_settings *link_settings,
+	enum clock_source_id clock_source)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	struct dc_context *ctx = enc110->base.ctx;
+	struct bp_transmitter_control cntl = { 0 };
+	enum bp_result result;
+
+	/* Enable the PHY */
+
+	/* number_of_lanes is used for pixel clock adjust,
+	 * but it's not passed to asic_control.
+	 * We need to set number of lanes manually.
+	 */
+	configure_encoder(enc110, link_settings);
+
+	cntl.action = TRANSMITTER_CONTROL_ENABLE;
+	cntl.engine_id = ENGINE_ID_UNKNOWN;
+	cntl.transmitter = enc110->base.transmitter;
+	cntl.pll_id = clock_source;
+	cntl.signal = SIGNAL_TYPE_DISPLAY_PORT;
+	cntl.lanes_number = link_settings->lane_count;
+	cntl.hpd_sel = enc110->base.hpd_source;
+	cntl.pixel_clock = link_settings->link_rate
+						* LINK_RATE_REF_FREQ_IN_KHZ;
+	/* TODO: check if undefined works */
+	cntl.color_depth = COLOR_DEPTH_UNDEFINED;
+
+	result = link_transmitter_control(enc110, &cntl);
+
+	if (result != BP_RESULT_OK) {
+		dm_logger_write(ctx->logger, LOG_ERROR,
+			"%s: Failed to execute VBIOS command table!\n",
+			__func__);
+		BREAK_TO_DEBUGGER();
+	}
+}
+
+/* enables DP PHY output in MST mode */
+void dce110_link_encoder_enable_dp_mst_output(
+	struct link_encoder *enc,
+	const struct dc_link_settings *link_settings,
+	enum clock_source_id clock_source)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	struct dc_context *ctx = enc110->base.ctx;
+	struct bp_transmitter_control cntl = { 0 };
+	enum bp_result result;
+
+	/* Enable the PHY */
+
+	/* number_of_lanes is used for pixel clock adjust,
+	 * but it's not passed to asic_control.
+	 * We need to set number of lanes manually.
+	 */
+	configure_encoder(enc110, link_settings);
+
+	cntl.action = TRANSMITTER_CONTROL_ENABLE;
+	cntl.engine_id = ENGINE_ID_UNKNOWN;
+	cntl.transmitter = enc110->base.transmitter;
+	cntl.pll_id = clock_source;
+	cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+	cntl.lanes_number = link_settings->lane_count;
+	cntl.hpd_sel = enc110->base.hpd_source;
+	cntl.pixel_clock = link_settings->link_rate
+						* LINK_RATE_REF_FREQ_IN_KHZ;
+	/* TODO: check if undefined works */
+	cntl.color_depth = COLOR_DEPTH_UNDEFINED;
+
+	result = link_transmitter_control(enc110, &cntl);
+
+	if (result != BP_RESULT_OK) {
+		dm_logger_write(ctx->logger, LOG_ERROR,
+			"%s: Failed to execute VBIOS command table!\n",
+			__func__);
+		BREAK_TO_DEBUGGER();
+	}
+}
+/*
+ * @brief
+ * Disable transmitter and its encoder
+ */
+void dce110_link_encoder_disable_output(
+	struct link_encoder *enc,
+	enum signal_type signal)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	struct dc_context *ctx = enc110->base.ctx;
+	struct bp_transmitter_control cntl = { 0 };
+	enum bp_result result;
+
+	if (!is_dig_enabled(enc110)) {
+		/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
+		return;
+	}
+	/* Power-down RX and disable GPU PHY should be paired.
+	 * Disabling PHY without powering down RX may cause
+	 * symbol lock loss, on which we will get DP Sink interrupt. */
+
+	/* There is a case for the DP active dongles
+	 * where we want to disable the PHY but keep RX powered,
+	 * for those we need to ignore DP Sink interrupt
+	 * by checking lane count that has been set
+	 * on the last do_enable_output(). */
+
+	/* disable transmitter */
+	cntl.action = TRANSMITTER_CONTROL_DISABLE;
+	cntl.transmitter = enc110->base.transmitter;
+	cntl.hpd_sel = enc110->base.hpd_source;
+	cntl.signal = signal;
+	cntl.connector_obj_id = enc110->base.connector;
+
+	result = link_transmitter_control(enc110, &cntl);
+
+	if (result != BP_RESULT_OK) {
+		dm_logger_write(ctx->logger, LOG_ERROR,
+			"%s: Failed to execute VBIOS command table!\n",
+			__func__);
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	/* disable encoder */
+	if (dc_is_dp_signal(signal))
+		link_encoder_disable(enc110);
+
+	if (enc110->base.connector.id == CONNECTOR_ID_EDP) {
+		/* power down eDP panel */
+		/* TODO: Power control cause regression, we should implement
+		 * it properly, for now just comment it.
+		 *
+		 * link_encoder_edp_wait_for_hpd_ready(
+			link_enc,
+			link_enc->connector,
+			false);
+
+		 * link_encoder_edp_power_control(
+				link_enc, false); */
+	}
+}
+
+void dce110_link_encoder_dp_set_lane_settings(
+	struct link_encoder *enc,
+	const struct link_training_settings *link_settings)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	union dpcd_training_lane_set training_lane_set = { { 0 } };
+	int32_t lane = 0;
+	struct bp_transmitter_control cntl = { 0 };
+
+	if (!link_settings) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS;
+	cntl.transmitter = enc110->base.transmitter;
+	cntl.connector_obj_id = enc110->base.connector;
+	cntl.lanes_number = link_settings->link_settings.lane_count;
+	cntl.hpd_sel = enc110->base.hpd_source;
+	cntl.pixel_clock = link_settings->link_settings.link_rate *
+						LINK_RATE_REF_FREQ_IN_KHZ;
+
+	for (lane = 0; lane < link_settings->link_settings.lane_count; ++lane) {
+		/* translate lane settings */
+
+		training_lane_set.bits.VOLTAGE_SWING_SET =
+			link_settings->lane_settings[lane].VOLTAGE_SWING;
+		training_lane_set.bits.PRE_EMPHASIS_SET =
+			link_settings->lane_settings[lane].PRE_EMPHASIS;
+
+		/* post cursor 2 setting only applies to HBR2 link rate */
+		if (link_settings->link_settings.link_rate == LINK_RATE_HIGH2) {
+			/* this is passed to VBIOS
+			 * to program post cursor 2 level */
+
+			training_lane_set.bits.POST_CURSOR2_SET =
+				link_settings->lane_settings[lane].POST_CURSOR2;
+		}
+
+		cntl.lane_select = lane;
+		cntl.lane_settings = training_lane_set.raw;
+
+		/* call VBIOS table to set voltage swing and pre-emphasis */
+		link_transmitter_control(enc110, &cntl);
+	}
+}
+
+/* set DP PHY test and training patterns */
+void dce110_link_encoder_dp_set_phy_pattern(
+	struct link_encoder *enc,
+	const struct encoder_set_dp_phy_pattern_param *param)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+
+	switch (param->dp_phy_pattern) {
+	case DP_TEST_PATTERN_TRAINING_PATTERN1:
+		dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 0);
+		break;
+	case DP_TEST_PATTERN_TRAINING_PATTERN2:
+		dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 1);
+		break;
+	case DP_TEST_PATTERN_TRAINING_PATTERN3:
+		dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 2);
+		break;
+	case DP_TEST_PATTERN_D102:
+		set_dp_phy_pattern_d102(enc110);
+		break;
+	case DP_TEST_PATTERN_SYMBOL_ERROR:
+		set_dp_phy_pattern_symbol_error(enc110);
+		break;
+	case DP_TEST_PATTERN_PRBS7:
+		set_dp_phy_pattern_prbs7(enc110);
+		break;
+	case DP_TEST_PATTERN_80BIT_CUSTOM:
+		set_dp_phy_pattern_80bit_custom(
+			enc110, param->custom_pattern);
+		break;
+	case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
+		set_dp_phy_pattern_hbr2_compliance(enc110);
+		break;
+	case DP_TEST_PATTERN_VIDEO_MODE: {
+		set_dp_phy_pattern_passthrough_mode(
+			enc110, param->dp_panel_mode);
+		break;
+	}
+
+	default:
+		/* invalid phy pattern */
+		ASSERT_CRITICAL(false);
+		break;
+	}
+}
+
+static void fill_stream_allocation_row_info(
+	const struct link_mst_stream_allocation *stream_allocation,
+	uint32_t *src,
+	uint32_t *slots)
+{
+	const struct stream_encoder *stream_enc = stream_allocation->stream_enc;
+
+	if (stream_enc) {
+		*src = stream_enc->id;
+		*slots = stream_allocation->slot_count;
+	} else {
+		*src = 0;
+		*slots = 0;
+	}
+}
+
+/* programs DP MST VC payload allocation */
+void dce110_link_encoder_update_mst_stream_allocation_table(
+	struct link_encoder *enc,
+	const struct link_mst_stream_allocation_table *table)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	uint32_t value0 = 0;
+	uint32_t value1 = 0;
+	uint32_t value2 = 0;
+	uint32_t slots = 0;
+	uint32_t src = 0;
+	uint32_t retries = 0;
+
+	/* For CZ, there are only 3 pipes. So Virtual channel is up 3.*/
+
+	/* --- Set MSE Stream Attribute -
+	 * Setup VC Payload Table on Tx Side,
+	 * Issue allocation change trigger
+	 * to commit payload on both tx and rx side */
+
+	/* we should clean-up table each time */
+
+	if (table->stream_count >= 1) {
+		fill_stream_allocation_row_info(
+			&table->stream_allocations[0],
+			&src,
+			&slots);
+	} else {
+		src = 0;
+		slots = 0;
+	}
+
+	REG_UPDATE_2(DP_MSE_SAT0,
+			DP_MSE_SAT_SRC0, src,
+			DP_MSE_SAT_SLOT_COUNT0, slots);
+
+	if (table->stream_count >= 2) {
+		fill_stream_allocation_row_info(
+			&table->stream_allocations[1],
+			&src,
+			&slots);
+	} else {
+		src = 0;
+		slots = 0;
+	}
+
+	REG_UPDATE_2(DP_MSE_SAT0,
+			DP_MSE_SAT_SRC1, src,
+			DP_MSE_SAT_SLOT_COUNT1, slots);
+
+	if (table->stream_count >= 3) {
+		fill_stream_allocation_row_info(
+			&table->stream_allocations[2],
+			&src,
+			&slots);
+	} else {
+		src = 0;
+		slots = 0;
+	}
+
+	REG_UPDATE_2(DP_MSE_SAT1,
+			DP_MSE_SAT_SRC2, src,
+			DP_MSE_SAT_SLOT_COUNT2, slots);
+
+	if (table->stream_count >= 4) {
+		fill_stream_allocation_row_info(
+			&table->stream_allocations[3],
+			&src,
+			&slots);
+	} else {
+		src = 0;
+		slots = 0;
+	}
+
+	REG_UPDATE_2(DP_MSE_SAT1,
+			DP_MSE_SAT_SRC3, src,
+			DP_MSE_SAT_SLOT_COUNT3, slots);
+
+	/* --- wait for transaction finish */
+
+	/* send allocation change trigger (ACT) ?
+	 * this step first sends the ACT,
+	 * then double buffers the SAT into the hardware
+	 * making the new allocation active on the DP MST mode link */
+
+
+	/* DP_MSE_SAT_UPDATE:
+	 * 0 - No Action
+	 * 1 - Update SAT with trigger
+	 * 2 - Update SAT without trigger */
+
+	REG_UPDATE(DP_MSE_SAT_UPDATE,
+			DP_MSE_SAT_UPDATE, 1);
+
+	/* wait for update to complete
+	 * (i.e. DP_MSE_SAT_UPDATE field is reset to 0)
+	 * then wait for the transmission
+	 * of at least 16 MTP headers on immediate local link.
+	 * i.e. DP_MSE_16_MTP_KEEPOUT field (read only) is reset to 0
+	 * a value of 1 indicates that DP MST mode
+	 * is in the 16 MTP keepout region after a VC has been added.
+	 * MST stream bandwidth (VC rate) can be configured
+	 * after this bit is cleared */
+
+	do {
+		udelay(10);
+
+		value0 = REG_READ(DP_MSE_SAT_UPDATE);
+
+		REG_GET(DP_MSE_SAT_UPDATE,
+				DP_MSE_SAT_UPDATE, &value1);
+
+		REG_GET(DP_MSE_SAT_UPDATE,
+				DP_MSE_16_MTP_KEEPOUT, &value2);
+
+		/* bit field DP_MSE_SAT_UPDATE is set to 1 already */
+		if (!value1 && !value2)
+			break;
+		++retries;
+	} while (retries < DP_MST_UPDATE_MAX_RETRY);
+}
+
+void dce110_link_encoder_set_lcd_backlight_level(
+	struct link_encoder *enc,
+	uint32_t level)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+
+	const uint32_t backlight_update_pending_max_retry = 1000;
+
+	uint32_t backlight_lock;
+
+	uint32_t i;
+	uint32_t backlight_24bit;
+	uint32_t backlight_17bit;
+	uint32_t backlight_16bit;
+	uint32_t masked_pwm_period;
+	uint8_t rounding_bit;
+	uint8_t bit_count;
+	uint64_t active_duty_cycle;
+	uint32_t pwm_period_bitcnt;
+
+	backlight_lock = REG_READ(BL_PWM_GRP1_REG_LOCK);
+
+	/*
+	 * 1. Convert 8-bit value to 17 bit U1.16 format
+	 * (1 integer, 16 fractional bits)
+	 */
+
+	/* 1.1 multiply 8 bit value by 0x10101 to get a 24 bit value,
+	 * effectively multiplying value by 256/255
+	 * eg. for a level of 0xEF, backlight_24bit = 0xEF * 0x10101 = 0xEFEFEF
+	 */
+	backlight_24bit = level * 0x10101;
+
+	/* 1.2 The upper 16 bits of the 24 bit value is the fraction, lower 8
+	 * used for rounding, take most significant bit of fraction for
+	 * rounding, e.g. for 0xEFEFEF, rounding bit is 1
+	 */
+	rounding_bit = (backlight_24bit >> 7) & 1;
+
+	/* 1.3 Add the upper 16 bits of the 24 bit value with the rounding bit
+	 * resulting in a 17 bit value e.g. 0xEFF0 = (0xEFEFEF >> 8) + 1
+	 */
+	backlight_17bit = (backlight_24bit >> 8) + rounding_bit;
+
+	/*
+	 * 2. Find  16 bit backlight active duty cycle, where 0 <= backlight
+	 * active duty cycle <= backlight period
+	 */
+
+	/* 2.1 Apply bitmask for backlight period value based on value of BITCNT
+	 */
+	{
+		REG_GET(BL_PWM_PERIOD_CNTL,
+			BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt);
+
+		if (pwm_period_bitcnt == 0)
+			bit_count = 16;
+		else
+			bit_count = pwm_period_bitcnt;
+	}
+
+	/* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
+	masked_pwm_period =
+		REG_GET(BL_PWM_PERIOD_CNTL,
+				BL_PWM_PERIOD, &masked_pwm_period)
+		& ((1 << bit_count) - 1);
+
+	/* 2.2 Calculate integer active duty cycle required upper 16 bits
+	 * contain integer component, lower 16 bits contain fractional component
+	 * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
+	 */
+	active_duty_cycle = backlight_17bit * masked_pwm_period;
+
+	/* 2.3 Calculate 16 bit active duty cycle from integer and fractional
+	 * components shift by bitCount then mask 16 bits and add rounding bit
+	 * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
+	 */
+	backlight_16bit = active_duty_cycle >> bit_count;
+	backlight_16bit &= 0xFFFF;
+	backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
+
+	REG_UPDATE(BL_PWM_CNTL,
+			BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
+
+	/*
+	 * 3. Program register with updated value
+	 */
+
+	/* 3.1 Lock group 2 backlight registers */
+
+	REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+			BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1);
+
+	REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+			BL_PWM_GRP1_REG_LOCK, 1);
+
+	/* 3.3 Unlock group 2 backlight registers */
+	REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+			BL_PWM_GRP1_REG_LOCK, 0);
+
+	/* 5.4.4 Wait for pending bit to be cleared */
+	for (i = 0; i < backlight_update_pending_max_retry; ++i) {
+		REG_GET(BL_PWM_GRP1_REG_LOCK,
+						BL_PWM_GRP1_REG_UPDATE_PENDING, &backlight_lock);
+		if (!backlight_lock)
+			break;
+
+		udelay(10);
+	}
+}
+
+void dce110_link_encoder_set_dmcu_backlight_level(
+	struct link_encoder *enc,
+	uint32_t level,
+	uint32_t frame_ramp,
+	uint32_t controller_id)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	struct dc_context *ctx = enc110->base.ctx;
+	unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
+	unsigned int dmcu_wait_reg_ready_interval = 100;
+	unsigned int backlight_17bit = level * 0x10101;
+	unsigned char temp_uchar =
+			(unsigned char)(((backlight_17bit & 0x80) >> 7) & 1);
+	unsigned int regValue;
+	uint32_t rampingBoundary = 0xFFFF;
+	uint32_t s2;
+
+	backlight_17bit = (backlight_17bit >> 8) + temp_uchar;
+
+	/* set ramping boundary */
+	REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
+
+	/* setDMCUParam_Pipe */
+	REG_UPDATE_2(MASTER_COMM_CMD_REG,
+			MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET,
+			MASTER_COMM_CMD_REG_BYTE1, controller_id);
+
+	/* notifyDMCUMsg */
+	REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+	/* waitDMCUReadyForCmd */
+	do {
+		dm_delay_in_microseconds(ctx, dmcu_wait_reg_ready_interval);
+		regValue = REG_READ(MASTER_COMM_CNTL_REG);
+		dmcu_max_retry_on_wait_reg_ready--;
+	} while
+	/* expected value is 0, loop while not 0*/
+	((MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK & regValue) &&
+		dmcu_max_retry_on_wait_reg_ready > 0);
+
+	/* setDMCUParam_BL */
+	REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_17bit);
+
+	/* write ramp */
+	REG_WRITE(MASTER_COMM_DATA_REG1, frame_ramp);
+
+	/* setDMCUParam_Cmd */
+	REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_BL_SET);
+
+	/* notifyDMCUMsg */
+	REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+	/* UpdateRequestedBacklightLevel */
+	s2 = REG_READ(BIOS_SCRATCH_2);
+
+	s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+	level &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
+				ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+	s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+	REG_WRITE(BIOS_SCRATCH_2, s2);
+}
+
+void dce110_link_encoder_init_dmcu_backlight_settings(
+	struct link_encoder *enc)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	uint32_t bl_pwm_cntl;
+	uint32_t pwmCntl;
+	uint32_t pwmCntl2;
+	uint32_t periodCntl;
+	uint32_t s2;
+	uint32_t value;
+
+	bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
+
+	/* It must not be 0, so we have to restore them
+	 * Bios bug w/a - period resets to zero,
+	 * restoring to cache values which is always correct
+	 */
+	REG_GET(BL_PWM_CNTL,
+				BL_ACTIVE_INT_FRAC_CNT, &value);
+	if (value == 0 || bl_pwm_cntl == 1) {
+		if (stored_backlight_registers.vBL_PWM_CNTL != 0) {
+			pwmCntl = stored_backlight_registers.vBL_PWM_CNTL;
+			REG_WRITE(BL_PWM_CNTL, pwmCntl);
+
+			pwmCntl2 = stored_backlight_registers.vBL_PWM_CNTL2;
+			REG_WRITE(BL_PWM_CNTL2, pwmCntl2);
+
+			periodCntl =
+				stored_backlight_registers.vBL_PWM_PERIOD_CNTL;
+			REG_WRITE(BL_PWM_PERIOD_CNTL, periodCntl);
+
+			REG_UPDATE(LVTMA_PWRSEQ_REF_DIV,
+				BL_PWM_REF_DIV,
+				stored_backlight_registers.
+				vLVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+		}
+	} else {
+		stored_backlight_registers.vBL_PWM_CNTL =
+				REG_READ(BL_PWM_CNTL);
+		stored_backlight_registers.vBL_PWM_CNTL2 =
+				REG_READ(BL_PWM_CNTL2);
+		stored_backlight_registers.vBL_PWM_PERIOD_CNTL =
+				REG_READ(BL_PWM_PERIOD_CNTL);
+
+		REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
+				&stored_backlight_registers.
+				vLVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+	}
+
+	/* Have driver take backlight control
+	 * TakeBacklightControl(true)
+	 */
+	s2 = REG_READ(BIOS_SCRATCH_2);
+	s2 |= ATOM_S2_VRI_BRIGHT_ENABLE;
+	REG_WRITE(BIOS_SCRATCH_2, s2);
+
+	/* Enable the backlight output */
+	REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
+
+}
+
+void dce110_link_encoder_set_dmcu_abm_level(
+	struct link_encoder *enc, uint32_t level)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	struct dc_context *ctx = enc110->base.ctx;
+
+	unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
+	unsigned int dmcu_wait_reg_ready_interval = 100;
+	unsigned int regValue;
+
+	/* waitDMCUReadyForCmd */
+	do {
+		dm_delay_in_microseconds(ctx, dmcu_wait_reg_ready_interval);
+		regValue = REG_READ(MASTER_COMM_CNTL_REG);
+		dmcu_max_retry_on_wait_reg_ready--;
+	} while
+	/* expected value is 0, loop while not 0*/
+	((MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK & regValue) &&
+		dmcu_max_retry_on_wait_reg_ready > 0);
+
+	/* setDMCUParam_ABMLevel */
+	REG_UPDATE_2(MASTER_COMM_CMD_REG,
+			MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_LEVEL_SET,
+			MASTER_COMM_CMD_REG_BYTE2, level);
+
+	/* notifyDMCUMsg */
+	REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+}
+
+static void get_dmcu_psr_state(struct link_encoder *enc, uint32_t *psr_state)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	struct dc_context *ctx = enc110->base.ctx;
+
+	uint32_t count = 0;
+	uint32_t psrStateOffset = 0xf0;
+	uint32_t value;
+
+	/* Enable write access to IRAM */
+	REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 1);
+
+	do {
+		dm_delay_in_microseconds(ctx, 2);
+		REG_GET(DCI_MEM_PWR_STATUS,
+					DMCU_IRAM_MEM_PWR_STATE, &value);
+	} while
+		(value != 0 && count++ < 10);
+
+	/* Write address to IRAM_RD_ADDR in DMCU_IRAM_RD_CTRL */
+	REG_WRITE(DMCU_IRAM_RD_CTRL, psrStateOffset);
+
+	/* Read data from IRAM_RD_DATA in DMCU_IRAM_RD_DATA*/
+	*psr_state = REG_READ(DMCU_IRAM_RD_DATA);
+
+	/* Disable write access to IRAM after finished using IRAM
+	 * in order to allow dynamic sleep state
+	 */
+	REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 0);
+}
+
+void dce110_link_encoder_set_dmcu_psr_enable(struct link_encoder *enc,
+								bool enable)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	struct dc_context *ctx = enc110->base.ctx;
+
+	unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
+	unsigned int dmcu_wait_reg_ready_interval = 100;
+
+	unsigned int regValue;
+
+	unsigned int retryCount;
+	uint32_t psr_state = 0;
+
+	/* waitDMCUReadyForCmd */
+	do {
+		dm_delay_in_microseconds(ctx, dmcu_wait_reg_ready_interval);
+		regValue = REG_READ(MASTER_COMM_CNTL_REG);
+		dmcu_max_retry_on_wait_reg_ready--;
+	} while
+	/* expected value is 0, loop while not 0*/
+	((MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK & regValue) &&
+		dmcu_max_retry_on_wait_reg_ready > 0);
+
+	/* setDMCUParam_Cmd */
+	if (enable)
+		REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_ENABLE);
+	else
+		REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_EXIT);
+
+	/* notifyDMCUMsg */
+	REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+	for (retryCount = 0; retryCount <= 100; retryCount++) {
+		get_dmcu_psr_state(enc, &psr_state);
+		if (enable) {
+			if (psr_state != 0)
+				break;
+		} else {
+			if (psr_state == 0)
+				break;
+		}
+		dm_delay_in_microseconds(ctx, 10);
+	}
+}
+
+void dce110_link_encoder_setup_dmcu_psr(struct link_encoder *enc,
+			struct psr_dmcu_context *psr_context)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	struct dc_context *ctx = enc110->base.ctx;
+
+	unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
+	unsigned int dmcu_wait_reg_ready_interval = 100;
+	unsigned int regValue;
+
+	union dce110_dmcu_psr_config_data_reg1 masterCmdData1;
+	union dce110_dmcu_psr_config_data_reg2 masterCmdData2;
+	union dce110_dmcu_psr_config_data_reg3 masterCmdData3;
+
+	if (psr_context->psrExitLinkTrainingRequired)
+		REG_UPDATE(DP_DPHY_FAST_TRAINING, DPHY_RX_FAST_TRAINING_CAPABLE, 1);
+	else {
+		REG_UPDATE(DP_DPHY_FAST_TRAINING, DPHY_RX_FAST_TRAINING_CAPABLE, 0);
+		/*In DCE 11, we are able to pre-program a Force SR register
+		 * to be able to trigger SR symbol after 5 idle patterns
+		 * transmitted. Upon PSR Exit, DMCU can trigger
+		 * DPHY_LOAD_BS_COUNT_START = 1. Upon writing 1 to
+		 * DPHY_LOAD_BS_COUNT_START and the internal counter
+		 * reaches DPHY_LOAD_BS_COUNT, the next BS symbol will be
+		 * replaced by SR symbol once.
+		 */
+
+		REG_UPDATE(DP_DPHY_BS_SR_SWAP_CNTL, DPHY_LOAD_BS_COUNT, 0x5);
+	}
+
+	/* Enable static screen interrupts for PSR supported display */
+	/* Disable the interrupt coming from other displays. */
+	REG_UPDATE_4(DMCU_INTERRUPT_TO_UC_EN_MASK,
+			STATIC_SCREEN1_INT_TO_UC_EN, 0,
+			STATIC_SCREEN2_INT_TO_UC_EN, 0,
+			STATIC_SCREEN3_INT_TO_UC_EN, 0,
+			STATIC_SCREEN4_INT_TO_UC_EN, 0);
+
+	switch (psr_context->controllerId) {
+	/* Driver uses case 1 for unconfigured */
+	case 1:
+		psr_crtc_offset = mmCRTC0_CRTC_STATIC_SCREEN_CONTROL -
+				mmCRTC0_CRTC_STATIC_SCREEN_CONTROL;
+
+		REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+				STATIC_SCREEN1_INT_TO_UC_EN, 1);
+		break;
+	case 2:
+		psr_crtc_offset = mmCRTC1_CRTC_STATIC_SCREEN_CONTROL -
+				mmCRTC0_CRTC_STATIC_SCREEN_CONTROL;
+
+		REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+				STATIC_SCREEN2_INT_TO_UC_EN, 1);
+		break;
+	case 3:
+		psr_crtc_offset = mmCRTC2_CRTC_STATIC_SCREEN_CONTROL -
+				mmCRTC0_CRTC_STATIC_SCREEN_CONTROL;
+
+		REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+				STATIC_SCREEN3_INT_TO_UC_EN, 1);
+		break;
+	case 4:
+		psr_crtc_offset = mmCRTC3_CRTC_STATIC_SCREEN_CONTROL -
+				mmCRTC0_CRTC_STATIC_SCREEN_CONTROL;
+
+		REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+				STATIC_SCREEN4_INT_TO_UC_EN, 1);
+		break;
+	case 5:
+		psr_crtc_offset = mmCRTC4_CRTC_STATIC_SCREEN_CONTROL -
+				mmCRTC0_CRTC_STATIC_SCREEN_CONTROL;
+		/* CZ/NL only has 4 CRTC!!
+		 * really valid.
+		 * There is no interrupt enable mask for these instances.
+		 */
+		break;
+	case 6:
+		psr_crtc_offset = mmCRTC5_CRTC_STATIC_SCREEN_CONTROL -
+				mmCRTC0_CRTC_STATIC_SCREEN_CONTROL;
+		/* CZ/NL only has 4 CRTC!!
+		 * These are here because they are defined in HW regspec,
+		 * but not really valid. There is no interrupt enable mask
+		 * for these instances.
+		 */
+		break;
+	default:
+		psr_crtc_offset = mmCRTC0_CRTC_STATIC_SCREEN_CONTROL -
+				mmCRTC0_CRTC_STATIC_SCREEN_CONTROL;
+
+		REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+				STATIC_SCREEN1_INT_TO_UC_EN, 1);
+		break;
+	}
+
+	REG_UPDATE_2(DP_SEC_CNTL1,
+		DP_SEC_GSP0_LINE_NUM, psr_context->sdpTransmitLineNumDeadline,
+		DP_SEC_GSP0_PRIORITY, 1);
+
+	if (psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION) {
+		REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1);
+	}
+
+	/* waitDMCUReadyForCmd */
+	do {
+		dm_delay_in_microseconds(ctx, dmcu_wait_reg_ready_interval);
+		regValue = REG_READ(MASTER_COMM_CNTL_REG);
+		dmcu_max_retry_on_wait_reg_ready--;
+	} while
+	/* expected value is 0, loop while not 0*/
+	((MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK & regValue) &&
+		dmcu_max_retry_on_wait_reg_ready > 0);
+
+	/* setDMCUParam_PSRHostConfigData */
+	masterCmdData1.u32All = 0;
+	masterCmdData1.bits.timehyst_frames = psr_context->timehyst_frames;
+	masterCmdData1.bits.hyst_lines = psr_context->hyst_lines;
+	masterCmdData1.bits.rfb_update_auto_en =
+			psr_context->rfb_update_auto_en;
+	masterCmdData1.bits.dp_port_num = psr_context->transmitterId;
+	masterCmdData1.bits.dcp_sel = psr_context->controllerId;
+	masterCmdData1.bits.phy_type  = psr_context->phyType;
+	masterCmdData1.bits.frame_cap_ind =
+			psr_context->psrFrameCaptureIndicationReq;
+	masterCmdData1.bits.aux_chan = psr_context->channel;
+	masterCmdData1.bits.aux_repeat = psr_context->aux_repeats;
+	dm_write_reg(ctx, REG(MASTER_COMM_DATA_REG1),
+					masterCmdData1.u32All);
+
+	masterCmdData2.u32All = 0;
+	masterCmdData2.bits.dig_fe = psr_context->engineId;
+	masterCmdData2.bits.dig_be = psr_context->transmitterId;
+	masterCmdData2.bits.skip_wait_for_pll_lock =
+			psr_context->skipPsrWaitForPllLock;
+	masterCmdData2.bits.frame_delay = psr_context->frame_delay;
+	masterCmdData2.bits.smu_phy_id = psr_context->smuPhyId;
+	masterCmdData2.bits.num_of_controllers =
+			psr_context->numberOfControllers;
+	dm_write_reg(ctx, REG(MASTER_COMM_DATA_REG2),
+			masterCmdData2.u32All);
+
+	masterCmdData3.u32All = 0;
+	masterCmdData3.bits.psr_level = psr_context->psr_level.u32all;
+	dm_write_reg(ctx, REG(MASTER_COMM_DATA_REG3),
+			masterCmdData3.u32All);
+
+	/* setDMCUParam_Cmd */
+	REG_UPDATE(MASTER_COMM_CMD_REG,
+			MASTER_COMM_CMD_REG_BYTE0, PSR_SET);
+
+	/* notifyDMCUMsg */
+	REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+}
+
+void dce110_link_encoder_connect_dig_be_to_fe(
+	struct link_encoder *enc,
+	enum engine_id engine,
+	bool connect)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	uint32_t field;
+
+	if (engine != ENGINE_ID_UNKNOWN) {
+
+		REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &field);
+
+		if (connect)
+			field |= get_frontend_source(engine);
+		else
+			field &= ~get_frontend_source(engine);
+
+		REG_UPDATE(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, field);
+	}
+}
+
+void dce110_link_encoder_enable_hpd(struct link_encoder *enc)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	struct dc_context *ctx = enc110->base.ctx;
+	uint32_t addr = HPD_REG(DC_HPD_CONTROL);
+	uint32_t hpd_enable = 0;
+	uint32_t value = dm_read_reg(ctx, addr);
+
+	get_reg_field_value(hpd_enable, DC_HPD_CONTROL, DC_HPD_EN);
+
+	if (hpd_enable == 0)
+		set_reg_field_value(value, 1, DC_HPD_CONTROL, DC_HPD_EN);
+}
+
+void dce110_link_encoder_disable_hpd(struct link_encoder *enc)
+{
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+	struct dc_context *ctx = enc110->base.ctx;
+	uint32_t addr = HPD_REG(DC_HPD_CONTROL);
+	uint32_t value = dm_read_reg(ctx, addr);
+
+	set_reg_field_value(value, 0, DC_HPD_CONTROL, DC_HPD_EN);
+}

+ 363 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h

@@ -0,0 +1,363 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_ENCODER__DCE110_H__
+#define __DC_LINK_ENCODER__DCE110_H__
+
+#include "link_encoder.h"
+
+#define TO_DCE110_LINK_ENC(link_encoder)\
+	container_of(link_encoder, struct dce110_link_encoder, base)
+
+#define AUX_REG_LIST(id)\
+	SRI(AUX_CONTROL, DP_AUX, id), \
+	SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id)
+
+#define HPD_REG_LIST(id)\
+	SRI(DC_HPD_CONTROL, HPD, id)
+
+#define LE_COMMON_REG_LIST_BASE(id) \
+	SR(BL_PWM_CNTL), \
+	SR(BL_PWM_GRP1_REG_LOCK), \
+	SR(BL_PWM_PERIOD_CNTL), \
+	SR(LVTMA_PWRSEQ_CNTL), \
+	SR(LVTMA_PWRSEQ_STATE), \
+	SR(BL_PWM_CNTL2), \
+	SR(LVTMA_PWRSEQ_REF_DIV), \
+	SR(MASTER_COMM_DATA_REG1), \
+	SR(MASTER_COMM_DATA_REG2), \
+	SR(MASTER_COMM_DATA_REG3), \
+	SR(MASTER_COMM_CMD_REG), \
+	SR(MASTER_COMM_CNTL_REG), \
+	SR(DMCU_RAM_ACCESS_CTRL), \
+	SR(DMCU_IRAM_RD_CTRL), \
+	SR(DMCU_IRAM_RD_DATA), \
+	SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
+	SR(SMU_INTERRUPT_CONTROL), \
+	SRI(DIG_BE_CNTL, DIG, id), \
+	SRI(DIG_BE_EN_CNTL, DIG, id), \
+	SRI(DP_CONFIG, DP, id), \
+	SRI(DP_DPHY_CNTL, DP, id), \
+	SRI(DP_DPHY_PRBS_CNTL, DP, id), \
+	SRI(DP_DPHY_SCRAM_CNTL, DP, id),\
+	SRI(DP_DPHY_SYM0, DP, id), \
+	SRI(DP_DPHY_SYM1, DP, id), \
+	SRI(DP_DPHY_SYM2, DP, id), \
+	SRI(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \
+	SRI(DP_LINK_CNTL, DP, id), \
+	SRI(DP_LINK_FRAMING_CNTL, DP, id), \
+	SRI(DP_MSE_SAT0, DP, id), \
+	SRI(DP_MSE_SAT1, DP, id), \
+	SRI(DP_MSE_SAT2, DP, id), \
+	SRI(DP_MSE_SAT_UPDATE, DP, id), \
+	SRI(DP_SEC_CNTL, DP, id), \
+	SRI(DP_VID_STREAM_CNTL, DP, id), \
+	SRI(DP_DPHY_FAST_TRAINING, DP, id), \
+	SRI(DP_SEC_CNTL1, DP, id)
+
+	#define LE_COMMON_REG_LIST(id)\
+		LE_COMMON_REG_LIST_BASE(id), \
+		SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+		SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+		SR(BIOS_SCRATCH_2), \
+		SR(BL1_PWM_USER_LEVEL), \
+		SR(DCI_MEM_PWR_STATUS)
+
+	#define LE_DCE110_REG_LIST(id)\
+		LE_COMMON_REG_LIST_BASE(id), \
+		SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+		SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+		SR(BIOS_SCRATCH_2), \
+		SR(BL1_PWM_USER_LEVEL), \
+		SR(DCI_MEM_PWR_STATUS)
+
+	#define LE_DCE80_REG_LIST(id)\
+		SR(BIOS_SCRATCH_2), \
+		SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+		SR(BL1_PWM_USER_LEVEL), \
+		LE_COMMON_REG_LIST_BASE(id)
+
+
+struct dce110_link_enc_aux_registers {
+	uint32_t AUX_CONTROL;
+	uint32_t AUX_DPHY_RX_CONTROL0;
+};
+
+struct dce110_link_enc_hpd_registers {
+	uint32_t DC_HPD_CONTROL;
+};
+
+struct dce110_link_enc_registers {
+	/* BL registers */
+	uint32_t BL_PWM_CNTL;
+	uint32_t BL_PWM_GRP1_REG_LOCK;
+	uint32_t BL_PWM_PERIOD_CNTL;
+	uint32_t LVTMA_PWRSEQ_CNTL;
+	uint32_t LVTMA_PWRSEQ_STATE;
+	uint32_t BL_PWM_CNTL2;
+	uint32_t LVTMA_PWRSEQ_REF_DIV;
+
+	/* DMCU registers */
+	uint32_t BL1_PWM_USER_LEVEL;
+	uint32_t ABM0_BL1_PWM_USER_LEVEL;
+	uint32_t MASTER_COMM_DATA_REG1;
+	uint32_t MASTER_COMM_DATA_REG2;
+	uint32_t MASTER_COMM_DATA_REG3;
+	uint32_t MASTER_COMM_CMD_REG;
+	uint32_t MASTER_COMM_CNTL_REG;
+	uint32_t BIOS_SCRATCH_2;
+	uint32_t DMCU_RAM_ACCESS_CTRL;
+	uint32_t DCI_MEM_PWR_STATUS;
+	uint32_t DMU_MEM_PWR_CNTL;
+	uint32_t DMCU_IRAM_RD_CTRL;
+	uint32_t DMCU_IRAM_RD_DATA;
+	uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK;
+	uint32_t SMU_INTERRUPT_CONTROL;
+
+
+	/* Common DP registers */
+	uint32_t DIG_BE_CNTL;
+	uint32_t DIG_BE_EN_CNTL;
+	uint32_t DP_CONFIG;
+	uint32_t DP_DPHY_CNTL;
+	uint32_t DP_DPHY_INTERNAL_CTRL;
+	uint32_t DP_DPHY_PRBS_CNTL;
+	uint32_t DP_DPHY_SCRAM_CNTL;
+	uint32_t DP_DPHY_SYM0;
+	uint32_t DP_DPHY_SYM1;
+	uint32_t DP_DPHY_SYM2;
+	uint32_t DP_DPHY_TRAINING_PATTERN_SEL;
+	uint32_t DP_LINK_CNTL;
+	uint32_t DP_LINK_FRAMING_CNTL;
+	uint32_t DP_MSE_SAT0;
+	uint32_t DP_MSE_SAT1;
+	uint32_t DP_MSE_SAT2;
+	uint32_t DP_MSE_SAT_UPDATE;
+	uint32_t DP_SEC_CNTL;
+	uint32_t DP_VID_STREAM_CNTL;
+	uint32_t DP_DPHY_FAST_TRAINING;
+	uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
+	uint32_t DP_SEC_CNTL1;
+};
+
+struct dce110_link_encoder {
+	struct link_encoder base;
+	const struct dce110_link_enc_registers *link_regs;
+	const struct dce110_link_enc_aux_registers *aux_regs;
+	const struct dce110_link_enc_hpd_registers *hpd_regs;
+};
+
+/*******************************************************************
+*   MASTER_COMM_DATA_REG1   Bit position    Data
+*                           7:0	            hyst_frames[7:0]
+*                           14:8	        hyst_lines[6:0]
+*                           15	            RFB_UPDATE_AUTO_EN
+*                           18:16	        phy_num[2:0]
+*                           21:19	        dcp_sel[2:0]
+*                           22	            phy_type
+*                           23	            frame_cap_ind
+*                           26:24	        aux_chan[2:0]
+*                           30:27	        aux_repeat[3:0]
+*                           31:31	        reserved[31:31]
+*******************************************************************/
+union dce110_dmcu_psr_config_data_reg1 {
+	struct {
+		unsigned int timehyst_frames:8;    /*[7:0]*/
+		unsigned int hyst_lines:7;         /*[14:8]*/
+		unsigned int rfb_update_auto_en:1; /*[15:15]*/
+		unsigned int dp_port_num:3;        /*[18:16]*/
+		unsigned int dcp_sel:3;            /*[21:19]*/
+		unsigned int phy_type:1;           /*[22:22]*/
+		unsigned int frame_cap_ind:1;      /*[23:23]*/
+		unsigned int aux_chan:3;           /*[26:24]*/
+		unsigned int aux_repeat:4;         /*[30:27]*/
+		unsigned int reserved:1;           /*[31:31]*/
+	} bits;
+	unsigned int u32All;
+};
+
+/*******************************************************************
+*   MASTER_COMM_DATA_REG2
+*******************************************************************/
+union dce110_dmcu_psr_config_data_reg2 {
+	struct {
+		unsigned int dig_fe:3;                  /*[2:0]*/
+		unsigned int dig_be:3;                  /*[5:3]*/
+		unsigned int skip_wait_for_pll_lock:1;  /*[6:6]*/
+		unsigned int reserved:9;                /*[15:7]*/
+		unsigned int frame_delay:8;             /*[23:16]*/
+		unsigned int smu_phy_id:4;              /*[27:24]*/
+		unsigned int num_of_controllers:4;      /*[31:28]*/
+	} bits;
+	unsigned int u32All;
+};
+
+/*******************************************************************
+*   MASTER_COMM_DATA_REG3
+*******************************************************************/
+union dce110_dmcu_psr_config_data_reg3 {
+	struct {
+		unsigned int psr_level:16;      /*[15:0]*/
+		unsigned int link_rate:4;       /*[19:16]*/
+		unsigned int reserved:12;       /*[31:20]*/
+	} bits;
+	unsigned int u32All;
+};
+
+struct dce110_abm_backlight_registers {
+	unsigned int vBL_PWM_CNTL;
+	unsigned int vBL_PWM_CNTL2;
+	unsigned int vBL_PWM_PERIOD_CNTL;
+	unsigned int vLVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
+};
+
+bool dce110_link_encoder_construct(
+	struct dce110_link_encoder *enc110,
+	const struct encoder_init_data *init_data,
+	const struct dce110_link_enc_registers *link_regs,
+	const struct dce110_link_enc_aux_registers *aux_regs,
+	const struct dce110_link_enc_hpd_registers *hpd_regs);
+
+bool dce110_link_encoder_validate_dvi_output(
+	const struct dce110_link_encoder *enc110,
+	enum signal_type connector_signal,
+	enum signal_type signal,
+	const struct dc_crtc_timing *crtc_timing);
+
+bool dce110_link_encoder_validate_rgb_output(
+	const struct dce110_link_encoder *enc110,
+	const struct dc_crtc_timing *crtc_timing);
+
+bool dce110_link_encoder_validate_dp_output(
+	const struct dce110_link_encoder *enc110,
+	const struct dc_crtc_timing *crtc_timing);
+
+bool dce110_link_encoder_validate_wireless_output(
+	const struct dce110_link_encoder *enc110,
+	const struct dc_crtc_timing *crtc_timing);
+
+bool dce110_link_encoder_validate_output_with_stream(
+	struct link_encoder *enc,
+	struct pipe_ctx *pipe_ctx);
+
+/****************** HW programming ************************/
+
+/* initialize HW */  /* why do we initialze aux in here? */
+void dce110_link_encoder_hw_init(struct link_encoder *enc);
+
+void dce110_link_encoder_destroy(struct link_encoder **enc);
+
+/* program DIG_MODE in DIG_BE */
+/* TODO can this be combined with enable_output? */
+void dce110_link_encoder_setup(
+	struct link_encoder *enc,
+	enum signal_type signal);
+
+/* enables TMDS PHY output */
+/* TODO: still need depth or just pass in adjusted pixel clock? */
+void dce110_link_encoder_enable_tmds_output(
+	struct link_encoder *enc,
+	enum clock_source_id clock_source,
+	enum dc_color_depth color_depth,
+	bool hdmi,
+	bool dual_link,
+	uint32_t pixel_clock);
+
+/* enables DP PHY output */
+void dce110_link_encoder_enable_dp_output(
+	struct link_encoder *enc,
+	const struct dc_link_settings *link_settings,
+	enum clock_source_id clock_source);
+
+/* enables DP PHY output in MST mode */
+void dce110_link_encoder_enable_dp_mst_output(
+	struct link_encoder *enc,
+	const struct dc_link_settings *link_settings,
+	enum clock_source_id clock_source);
+
+/* disable PHY output */
+void dce110_link_encoder_disable_output(
+	struct link_encoder *link_enc,
+	enum signal_type signal);
+
+/* set DP lane settings */
+void dce110_link_encoder_dp_set_lane_settings(
+	struct link_encoder *enc,
+	const struct link_training_settings *link_settings);
+
+void dce110_link_encoder_dp_set_phy_pattern(
+	struct link_encoder *enc,
+	const struct encoder_set_dp_phy_pattern_param *param);
+
+/* programs DP MST VC payload allocation */
+void dce110_link_encoder_update_mst_stream_allocation_table(
+	struct link_encoder *enc,
+	const struct link_mst_stream_allocation_table *table);
+
+void dce110_link_encoder_set_lcd_backlight_level(
+	struct link_encoder *enc,
+	uint32_t level);
+
+void dce110_link_encoder_set_dmcu_backlight_level(
+	struct link_encoder *enc,
+	uint32_t level,
+	uint32_t frame_ramp,
+	uint32_t controller_id);
+
+void dce110_link_encoder_init_dmcu_backlight_settings(
+	struct link_encoder *enc);
+
+void dce110_link_encoder_set_dmcu_abm_level(
+	struct link_encoder *enc,
+	uint32_t level);
+
+void dce110_link_encoder_set_dmcu_psr_enable(
+		struct link_encoder *enc, bool enable);
+
+void dce110_link_encoder_setup_dmcu_psr(struct link_encoder *enc,
+			struct psr_dmcu_context *psr_context);
+
+void dce110_link_encoder_edp_backlight_control(
+	struct link_encoder *enc,
+	bool enable);
+
+void dce110_link_encoder_edp_power_control(
+	struct link_encoder *enc,
+	bool power_up);
+
+void dce110_link_encoder_connect_dig_be_to_fe(
+	struct link_encoder *enc,
+	enum engine_id engine,
+	bool connect);
+
+void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
+	struct link_encoder *enc,
+	uint32_t index);
+
+void dce110_link_encoder_enable_hpd(struct link_encoder *enc);
+
+void dce110_link_encoder_disable_hpd(struct link_encoder *enc);
+
+#endif /* __DC_LINK_ENCODER__DCE110_H__ */

+ 384 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c

@@ -0,0 +1,384 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "mem_input.h"
+#include "reg_helper.h"
+
+#define CTX \
+	mi->ctx
+#define REG(reg)\
+	mi->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+	mi->shifts->field_name, mi->masks->field_name
+
+
+static void program_urgency_watermark(struct mem_input *mi,
+	uint32_t wm_select,
+	uint32_t urgency_low_wm,
+	uint32_t urgency_high_wm)
+{
+	REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+		URGENCY_WATERMARK_MASK, wm_select);
+
+	REG_SET_2(DPG_PIPE_URGENCY_CONTROL, 0,
+		URGENCY_LOW_WATERMARK, urgency_low_wm,
+		URGENCY_HIGH_WATERMARK, urgency_high_wm);
+}
+
+static void program_nbp_watermark(struct mem_input *mi,
+	uint32_t wm_select,
+	uint32_t nbp_wm)
+{
+	if (REG(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL)) {
+		REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+				NB_PSTATE_CHANGE_WATERMARK_MASK, wm_select);
+
+		REG_UPDATE_3(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
+				NB_PSTATE_CHANGE_ENABLE, 1,
+				NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, 1,
+				NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, 1);
+
+		REG_UPDATE(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
+				NB_PSTATE_CHANGE_WATERMARK, nbp_wm);
+	}
+}
+
+static void program_stutter_watermark(struct mem_input *mi,
+	uint32_t wm_select,
+	uint32_t stutter_mark)
+{
+	REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+		STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, wm_select);
+
+		REG_UPDATE(DPG_PIPE_STUTTER_CONTROL,
+				STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark);
+}
+
+void dce_mem_input_program_display_marks(struct mem_input *mi,
+	struct bw_watermarks nbp,
+	struct bw_watermarks stutter,
+	struct bw_watermarks urgent,
+	uint32_t total_dest_line_time_ns)
+{
+	uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1;
+
+	program_urgency_watermark(mi, 0, /* set a */
+			urgent.a_mark, total_dest_line_time_ns);
+	program_urgency_watermark(mi, 1, /* set b */
+			urgent.b_mark, total_dest_line_time_ns);
+	program_urgency_watermark(mi, 2, /* set c */
+			urgent.c_mark, total_dest_line_time_ns);
+	program_urgency_watermark(mi, 3, /* set d */
+			urgent.d_mark, total_dest_line_time_ns);
+
+	REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL,
+		STUTTER_ENABLE, stutter_en,
+		STUTTER_IGNORE_FBC, 1);
+	program_nbp_watermark(mi, 0, nbp.a_mark); /* set a */
+	program_nbp_watermark(mi, 1, nbp.b_mark); /* set b */
+	program_nbp_watermark(mi, 2, nbp.c_mark); /* set c */
+	program_nbp_watermark(mi, 3, nbp.d_mark); /* set d */
+
+	program_stutter_watermark(mi, 0, stutter.a_mark); /* set a */
+	program_stutter_watermark(mi, 1, stutter.b_mark); /* set b */
+	program_stutter_watermark(mi, 2, stutter.c_mark); /* set c */
+	program_stutter_watermark(mi, 3, stutter.d_mark); /* set d */
+}
+
+static void program_tiling(struct mem_input *mi,
+	const union dc_tiling_info *info)
+{
+	if (mi->masks->GRPH_ARRAY_MODE) { /* GFX8 */
+		REG_UPDATE_9(GRPH_CONTROL,
+				GRPH_NUM_BANKS, info->gfx8.num_banks,
+				GRPH_BANK_WIDTH, info->gfx8.bank_width,
+				GRPH_BANK_HEIGHT, info->gfx8.bank_height,
+				GRPH_MACRO_TILE_ASPECT, info->gfx8.tile_aspect,
+				GRPH_TILE_SPLIT, info->gfx8.tile_split,
+				GRPH_MICRO_TILE_MODE, info->gfx8.tile_mode,
+				GRPH_PIPE_CONFIG, info->gfx8.pipe_config,
+				GRPH_ARRAY_MODE, info->gfx8.array_mode,
+				GRPH_COLOR_EXPANSION_MODE, 1);
+		/* 01 - DCP_GRPH_COLOR_EXPANSION_MODE_ZEXP: zero expansion for YCbCr */
+		/*
+				GRPH_Z, 0);
+				*/
+	}
+}
+
+
+static void program_size_and_rotation(
+	struct mem_input *mi,
+	enum dc_rotation_angle rotation,
+	const union plane_size *plane_size)
+{
+	const struct rect *in_rect = &plane_size->grph.surface_size;
+	struct rect hw_rect = plane_size->grph.surface_size;
+	const uint32_t rotation_angles[ROTATION_ANGLE_COUNT] = {
+			[ROTATION_ANGLE_0] = 0,
+			[ROTATION_ANGLE_90] = 1,
+			[ROTATION_ANGLE_180] = 2,
+			[ROTATION_ANGLE_270] = 3,
+	};
+
+	if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) {
+		hw_rect.x = in_rect->y;
+		hw_rect.y = in_rect->x;
+
+		hw_rect.height = in_rect->width;
+		hw_rect.width = in_rect->height;
+	}
+
+	REG_SET(GRPH_X_START, 0,
+			GRPH_X_START, hw_rect.x);
+
+	REG_SET(GRPH_Y_START, 0,
+			GRPH_Y_START, hw_rect.y);
+
+	REG_SET(GRPH_X_END, 0,
+			GRPH_X_END, hw_rect.width);
+
+	REG_SET(GRPH_Y_END, 0,
+			GRPH_Y_END, hw_rect.height);
+
+	REG_SET(GRPH_PITCH, 0,
+			GRPH_PITCH, plane_size->grph.surface_pitch);
+
+	REG_SET(HW_ROTATION, 0,
+			GRPH_ROTATION_ANGLE, rotation_angles[rotation]);
+}
+
+static void program_grph_pixel_format(
+	struct mem_input *mi,
+	enum surface_pixel_format format)
+{
+	uint32_t red_xbar = 0, blue_xbar = 0; /* no swap */
+	uint32_t grph_depth, grph_format;
+	uint32_t sign = 0, floating = 0;
+
+	if (format == SURFACE_PIXEL_FORMAT_GRPH_BGRA8888 ||
+			/*todo: doesn't look like we handle BGRA here,
+			 *  should problem swap endian*/
+		format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010 ||
+		format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS ||
+		format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
+		/* ABGR formats */
+		red_xbar = 2;
+		blue_xbar = 2;
+	}
+
+	REG_SET_2(GRPH_SWAP_CNTL, 0,
+			GRPH_RED_CROSSBAR, red_xbar,
+			GRPH_BLUE_CROSSBAR, blue_xbar);
+
+	switch (format) {
+	case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+		grph_depth = 0;
+		grph_format = 0;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+		grph_depth = 1;
+		grph_format = 0;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+		grph_depth = 1;
+		grph_format = 1;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+	case SURFACE_PIXEL_FORMAT_GRPH_BGRA8888:
+		grph_depth = 2;
+		grph_format = 0;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+		grph_depth = 2;
+		grph_format = 1;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+		sign = 1;
+		floating = 1;
+		/* no break */
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+		grph_depth = 3;
+		grph_format = 0;
+		break;
+	default:
+		DC_ERR("unsupported grph pixel format");
+		break;
+	}
+
+	REG_UPDATE_2(GRPH_CONTROL,
+			GRPH_DEPTH, grph_depth,
+			GRPH_FORMAT, grph_format);
+
+	REG_UPDATE_4(PRESCALE_GRPH_CONTROL,
+			GRPH_PRESCALE_SELECT, floating,
+			GRPH_PRESCALE_R_SIGN, sign,
+			GRPH_PRESCALE_G_SIGN, sign,
+			GRPH_PRESCALE_B_SIGN, sign);
+}
+
+bool dce_mem_input_program_surface_config(struct mem_input *mi,
+	enum surface_pixel_format format,
+	union dc_tiling_info *tiling_info,
+	union plane_size *plane_size,
+	enum dc_rotation_angle rotation,
+	struct dc_plane_dcc_param *dcc,
+	bool horizontal_mirror)
+{
+	REG_UPDATE(GRPH_ENABLE, GRPH_ENABLE, 1);
+
+	program_tiling(mi, tiling_info);
+	program_size_and_rotation(mi, rotation, plane_size);
+
+	if (format >= SURFACE_PIXEL_FORMAT_GRPH_BEGIN &&
+		format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+		program_grph_pixel_format(mi, format);
+
+	return true;
+}
+
+static uint32_t get_dmif_switch_time_us(
+	uint32_t h_total,
+	uint32_t v_total,
+	uint32_t pix_clk_khz)
+{
+	uint32_t frame_time;
+	uint32_t pixels_per_second;
+	uint32_t pixels_per_frame;
+	uint32_t refresh_rate;
+	const uint32_t us_in_sec = 1000000;
+	const uint32_t min_single_frame_time_us = 30000;
+	/*return double of frame time*/
+	const uint32_t single_frame_time_multiplier = 2;
+
+	if (!h_total || v_total || !pix_clk_khz)
+		return single_frame_time_multiplier * min_single_frame_time_us;
+
+	/*TODO: should we use pixel format normalized pixel clock here?*/
+	pixels_per_second = pix_clk_khz * 1000;
+	pixels_per_frame = h_total * v_total;
+
+	if (!pixels_per_second || !pixels_per_frame) {
+		/* avoid division by zero */
+		ASSERT(pixels_per_frame);
+		ASSERT(pixels_per_second);
+		return single_frame_time_multiplier * min_single_frame_time_us;
+	}
+
+	refresh_rate = pixels_per_second / pixels_per_frame;
+
+	if (!refresh_rate) {
+		/* avoid division by zero*/
+		ASSERT(refresh_rate);
+		return single_frame_time_multiplier * min_single_frame_time_us;
+	}
+
+	frame_time = us_in_sec / refresh_rate;
+
+	if (frame_time < min_single_frame_time_us)
+		frame_time = min_single_frame_time_us;
+
+	frame_time *= single_frame_time_multiplier;
+
+	return frame_time;
+}
+
+void dce_mem_input_allocate_dmif(struct mem_input *mi,
+	uint32_t h_total,
+	uint32_t v_total,
+	uint32_t pix_clk_khz,
+	uint32_t total_stream_num)
+{
+	const uint32_t retry_delay = 10;
+	uint32_t retry_count = get_dmif_switch_time_us(
+			h_total,
+			v_total,
+			pix_clk_khz) / retry_delay;
+
+	uint32_t pix_dur;
+	uint32_t buffers_allocated;
+	uint32_t dmif_buffer_control;
+
+	dmif_buffer_control = REG_GET(DMIF_BUFFER_CONTROL,
+			DMIF_BUFFERS_ALLOCATED, &buffers_allocated);
+
+	if (buffers_allocated == 2)
+		return;
+
+	REG_SET(DMIF_BUFFER_CONTROL, dmif_buffer_control,
+			DMIF_BUFFERS_ALLOCATED, 2);
+
+	REG_WAIT(DMIF_BUFFER_CONTROL,
+			DMIF_BUFFERS_ALLOCATION_COMPLETED, 1,
+			retry_delay, retry_count);
+
+	if (pix_clk_khz != 0) {
+		pix_dur = 1000000000ULL / pix_clk_khz;
+
+		REG_UPDATE(DPG_PIPE_ARBITRATION_CONTROL1,
+			PIXEL_DURATION, pix_dur);
+	}
+
+	if (mi->wa.single_head_rdreq_dmif_limit) {
+		uint32_t eanble =  (total_stream_num > 1) ? 0 :
+				mi->wa.single_head_rdreq_dmif_limit;
+
+		REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
+				ENABLE, eanble);
+	}
+}
+
+void dce_mem_input_free_dmif(struct mem_input *mi,
+		uint32_t total_stream_num)
+{
+	uint32_t buffers_allocated;
+	uint32_t dmif_buffer_control;
+
+	dmif_buffer_control = REG_GET(DMIF_BUFFER_CONTROL,
+			DMIF_BUFFERS_ALLOCATED, &buffers_allocated);
+
+	if (buffers_allocated == 0)
+		return;
+
+	REG_SET(DMIF_BUFFER_CONTROL, dmif_buffer_control,
+			DMIF_BUFFERS_ALLOCATED, 0);
+
+	REG_WAIT(DMIF_BUFFER_CONTROL,
+			DMIF_BUFFERS_ALLOCATION_COMPLETED, 1,
+			10, 0xBB8);
+
+	if (mi->wa.single_head_rdreq_dmif_limit) {
+		uint32_t eanble =  (total_stream_num > 1) ? 0 :
+				mi->wa.single_head_rdreq_dmif_limit;
+
+		REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
+				ENABLE, eanble);
+	}
+}

+ 217 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h

@@ -0,0 +1,217 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DCE_MEM_INPUT_H__
+#define __DCE_MEM_INPUT_H__
+
+#define MI_DCE_BASE_REG_LIST(id)\
+	SRI(GRPH_ENABLE, DCP, id),\
+	SRI(GRPH_CONTROL, DCP, id),\
+	SRI(GRPH_X_START, DCP, id),\
+	SRI(GRPH_Y_START, DCP, id),\
+	SRI(GRPH_X_END, DCP, id),\
+	SRI(GRPH_Y_END, DCP, id),\
+	SRI(GRPH_PITCH, DCP, id),\
+	SRI(HW_ROTATION, DCP, id),\
+	SRI(GRPH_SWAP_CNTL, DCP, id),\
+	SRI(PRESCALE_GRPH_CONTROL, DCP, id),\
+	SRI(DPG_PIPE_ARBITRATION_CONTROL1, DMIF_PG, id),\
+	SRI(DPG_WATERMARK_MASK_CONTROL, DMIF_PG, id),\
+	SRI(DPG_PIPE_URGENCY_CONTROL, DMIF_PG, id),\
+	SRI(DPG_PIPE_STUTTER_CONTROL, DMIF_PG, id),\
+	SRI(DMIF_BUFFER_CONTROL, PIPE, id)
+
+#define MI_REG_LIST(id)\
+	MI_DCE_BASE_REG_LIST(id),\
+	SRI(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, DMIF_PG, id)
+
+struct dce_mem_input_registers {
+	/* DCP */
+	uint32_t GRPH_ENABLE;
+	uint32_t GRPH_CONTROL;
+	uint32_t GRPH_X_START;
+	uint32_t GRPH_Y_START;
+	uint32_t GRPH_X_END;
+	uint32_t GRPH_Y_END;
+	uint32_t GRPH_PITCH;
+	uint32_t HW_ROTATION;
+	uint32_t GRPH_SWAP_CNTL;
+	uint32_t PRESCALE_GRPH_CONTROL;
+	/* DMIF_PG */
+	uint32_t DPG_PIPE_ARBITRATION_CONTROL1;
+	uint32_t DPG_WATERMARK_MASK_CONTROL;
+	uint32_t DPG_PIPE_URGENCY_CONTROL;
+	uint32_t DPG_PIPE_NB_PSTATE_CHANGE_CONTROL;
+	uint32_t DPG_PIPE_LOW_POWER_CONTROL;
+	uint32_t DPG_PIPE_STUTTER_CONTROL;
+	uint32_t DPG_PIPE_STUTTER_CONTROL2;
+	/* DCI */
+	uint32_t DMIF_BUFFER_CONTROL;
+	/* MC_HUB */
+	uint32_t MC_HUB_RDREQ_DMIF_LIMIT;
+};
+
+/* Set_Filed_for_Block */
+#define SFB(blk_name, reg_name, field_name, post_fix)\
+	.field_name = blk_name ## reg_name ## __ ## field_name ## post_fix
+
+#define MI_GFX8_TILE_MASK_SH_LIST(mask_sh, blk)\
+	SFB(blk, GRPH_CONTROL, GRPH_BANK_HEIGHT, mask_sh),\
+	SFB(blk, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, mask_sh),\
+	SFB(blk, GRPH_CONTROL, GRPH_TILE_SPLIT, mask_sh),\
+	SFB(blk, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, mask_sh),\
+	SFB(blk, GRPH_CONTROL, GRPH_PIPE_CONFIG, mask_sh),\
+	SFB(blk, GRPH_CONTROL, GRPH_ARRAY_MODE, mask_sh),\
+	SFB(blk, GRPH_CONTROL, GRPH_COLOR_EXPANSION_MODE, mask_sh)
+
+#define MI_DCP_MASK_SH_LIST(mask_sh, blk)\
+	SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\
+	SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\
+	SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\
+	SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
+	SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\
+	SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\
+	SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\
+	SFB(blk, GRPH_Y_END, GRPH_Y_END, mask_sh),\
+	SFB(blk, GRPH_PITCH, GRPH_PITCH, mask_sh),\
+	SFB(blk, HW_ROTATION, GRPH_ROTATION_ANGLE, mask_sh),\
+	SFB(blk, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, mask_sh),\
+	SFB(blk, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, mask_sh),\
+	SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_SELECT, mask_sh),\
+	SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_R_SIGN, mask_sh),\
+	SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_G_SIGN, mask_sh),\
+	SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_B_SIGN, mask_sh)
+
+#define MI_DMIF_PG_MASK_SH_LIST(mask_sh, blk)\
+	SFB(blk, DPG_PIPE_ARBITRATION_CONTROL1, PIXEL_DURATION, mask_sh),\
+	SFB(blk, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, mask_sh),\
+	SFB(blk, DPG_WATERMARK_MASK_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, mask_sh),\
+	SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, mask_sh),\
+	SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, mask_sh),\
+	SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE, mask_sh),\
+	SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_IGNORE_FBC, mask_sh),\
+	SF(PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, mask_sh),\
+	SF(PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED, mask_sh)
+
+#define MI_DMIF_PG_MASK_SH_DCE(mask_sh, blk)\
+	SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK, mask_sh),\
+	SFB(blk, DPG_WATERMARK_MASK_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
+	SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_ENABLE, mask_sh),\
+	SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\
+	SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, mask_sh),\
+	SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_WATERMARK, mask_sh)
+
+#define MI_DCE_MASK_SH_LIST(mask_sh)\
+	MI_DCP_MASK_SH_LIST(mask_sh,),\
+	MI_DMIF_PG_MASK_SH_LIST(mask_sh,),\
+	MI_DMIF_PG_MASK_SH_DCE(mask_sh,),\
+	MI_GFX8_TILE_MASK_SH_LIST(mask_sh,)
+
+#define MI_REG_FIELD_LIST(type) \
+	type GRPH_ENABLE; \
+	type GRPH_X_START; \
+	type GRPH_Y_START; \
+	type GRPH_X_END; \
+	type GRPH_Y_END; \
+	type GRPH_PITCH; \
+	type GRPH_ROTATION_ANGLE; \
+	type GRPH_RED_CROSSBAR; \
+	type GRPH_BLUE_CROSSBAR; \
+	type GRPH_PRESCALE_SELECT; \
+	type GRPH_PRESCALE_R_SIGN; \
+	type GRPH_PRESCALE_G_SIGN; \
+	type GRPH_PRESCALE_B_SIGN; \
+	type GRPH_DEPTH; \
+	type GRPH_FORMAT; \
+	type GRPH_NUM_BANKS; \
+	type GRPH_BANK_WIDTH;\
+	type GRPH_BANK_HEIGHT;\
+	type GRPH_MACRO_TILE_ASPECT;\
+	type GRPH_TILE_SPLIT;\
+	type GRPH_MICRO_TILE_MODE;\
+	type GRPH_PIPE_CONFIG;\
+	type GRPH_ARRAY_MODE;\
+	type GRPH_COLOR_EXPANSION_MODE;\
+	type GRPH_SW_MODE; \
+	type GRPH_NUM_SHADER_ENGINES; \
+	type GRPH_NUM_PIPES; \
+	type PIXEL_DURATION; \
+	type URGENCY_WATERMARK_MASK; \
+	type PSTATE_CHANGE_WATERMARK_MASK; \
+	type NB_PSTATE_CHANGE_WATERMARK_MASK; \
+	type STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK; \
+	type URGENCY_LOW_WATERMARK; \
+	type URGENCY_HIGH_WATERMARK; \
+	type NB_PSTATE_CHANGE_ENABLE; \
+	type NB_PSTATE_CHANGE_URGENT_DURING_REQUEST; \
+	type NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST; \
+	type NB_PSTATE_CHANGE_WATERMARK; \
+	type PSTATE_CHANGE_ENABLE; \
+	type PSTATE_CHANGE_URGENT_DURING_REQUEST; \
+	type PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST; \
+	type PSTATE_CHANGE_WATERMARK; \
+	type STUTTER_ENABLE; \
+	type STUTTER_IGNORE_FBC; \
+	type STUTTER_EXIT_SELF_REFRESH_WATERMARK; \
+	type DMIF_BUFFERS_ALLOCATED; \
+	type DMIF_BUFFERS_ALLOCATION_COMPLETED; \
+	type ENABLE; /* MC_HUB_RDREQ_DMIF_LIMIT */\
+
+struct dce_mem_input_shift {
+	MI_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce_mem_input_mask {
+	MI_REG_FIELD_LIST(uint32_t)
+};
+
+struct dce_mem_input_wa {
+	uint8_t single_head_rdreq_dmif_limit;
+};
+
+struct mem_input;
+bool dce_mem_input_program_surface_config(struct mem_input *mi,
+	enum surface_pixel_format format,
+	union dc_tiling_info *tiling_info,
+	union plane_size *plane_size,
+	enum dc_rotation_angle rotation,
+	struct dc_plane_dcc_param *dcc,
+	bool horizontal_mirror);
+
+void dce_mem_input_allocate_dmif(struct mem_input *mi,
+	uint32_t h_total,
+	uint32_t v_total,
+	uint32_t pix_clk_khz,
+	uint32_t total_stream_num);
+
+void dce_mem_input_free_dmif(struct mem_input *mi,
+	uint32_t total_stream_num);
+
+void dce_mem_input_program_display_marks(struct mem_input *mi,
+	struct bw_watermarks nbp,
+	struct bw_watermarks stutter,
+	struct bw_watermarks urgent,
+	uint32_t total_dest_line_time_ns);
+
+#endif /*__DCE_MEM_INPUT_H__*/

+ 501 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c

@@ -0,0 +1,501 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "transform.h"
+
+const uint16_t filter_2tap_16p[18] = {
+	4096, 0,
+	3840, 256,
+	3584, 512,
+	3328, 768,
+	3072, 1024,
+	2816, 1280,
+	2560, 1536,
+	2304, 1792,
+	2048, 2048
+};
+
+const uint16_t filter_3tap_16p_upscale[27] = {
+	2048, 2048, 0,
+	1708, 2424, 16348,
+	1372, 2796, 16308,
+	1056, 3148, 16272,
+	768, 3464, 16244,
+	512, 3728, 16236,
+	296, 3928, 16252,
+	124, 4052, 16296,
+	0, 4096, 0
+};
+
+const uint16_t filter_3tap_16p_117[27] = {
+	2048, 2048, 0,
+	1824, 2276, 16376,
+	1600, 2496, 16380,
+	1376, 2700, 16,
+	1156, 2880, 52,
+	948, 3032, 108,
+	756, 3144, 192,
+	580, 3212, 296,
+	428, 3236, 428
+};
+
+const uint16_t filter_3tap_16p_150[27] = {
+	2048, 2048, 0,
+	1872, 2184, 36,
+	1692, 2308, 88,
+	1516, 2420, 156,
+	1340, 2516, 236,
+	1168, 2592, 328,
+	1004, 2648, 440,
+	844, 2684, 560,
+	696, 2696, 696
+};
+
+const uint16_t filter_3tap_16p_183[27] = {
+	2048, 2048, 0,
+	1892, 2104, 92,
+	1744, 2152, 196,
+	1592, 2196, 300,
+	1448, 2232, 412,
+	1304, 2256, 528,
+	1168, 2276, 648,
+	1032, 2288, 772,
+	900, 2292, 900
+};
+
+const uint16_t filter_4tap_16p_upscale[36] = {
+	0, 4096, 0, 0,
+	16240, 4056, 180, 16380,
+	16136, 3952, 404, 16364,
+	16072, 3780, 664, 16344,
+	16040, 3556, 952, 16312,
+	16036, 3284, 1268, 16272,
+	16052, 2980, 1604, 16224,
+	16084, 2648, 1952, 16176,
+	16128, 2304, 2304, 16128
+};
+
+const uint16_t filter_4tap_16p_117[36] = {
+	428, 3236, 428, 0,
+	276, 3232, 604, 16364,
+	148, 3184, 800, 16340,
+	44, 3104, 1016, 16312,
+	16344, 2984, 1244, 16284,
+	16284, 2832, 1488, 16256,
+	16244, 2648, 1732, 16236,
+	16220, 2440, 1976, 16220,
+	16212, 2216, 2216, 16212
+};
+
+const uint16_t filter_4tap_16p_150[36] = {
+	696, 2700, 696, 0,
+	560, 2700, 848, 16364,
+	436, 2676, 1008, 16348,
+	328, 2628, 1180, 16336,
+	232, 2556, 1356, 16328,
+	152, 2460, 1536, 16328,
+	84, 2344, 1716, 16332,
+	28, 2208, 1888, 16348,
+	16376, 2052, 2052, 16376
+};
+
+const uint16_t filter_4tap_16p_183[36] = {
+	940, 2208, 940, 0,
+	832, 2200, 1052, 4,
+	728, 2180, 1164, 16,
+	628, 2148, 1280, 36,
+	536, 2100, 1392, 60,
+	448, 2044, 1504, 92,
+	368, 1976, 1612, 132,
+	296, 1900, 1716, 176,
+	232, 1812, 1812, 232
+};
+
+const uint16_t filter_2tap_64p[66] = {
+	4096, 0,
+	4032, 64,
+	3968, 128,
+	3904, 192,
+	3840, 256,
+	3776, 320,
+	3712, 384,
+	3648, 448,
+	3584, 512,
+	3520, 576,
+	3456, 640,
+	3392, 704,
+	3328, 768,
+	3264, 832,
+	3200, 896,
+	3136, 960,
+	3072, 1024,
+	3008, 1088,
+	2944, 1152,
+	2880, 1216,
+	2816, 1280,
+	2752, 1344,
+	2688, 1408,
+	2624, 1472,
+	2560, 1536,
+	2496, 1600,
+	2432, 1664,
+	2368, 1728,
+	2304, 1792,
+	2240, 1856,
+	2176, 1920,
+	2112, 1984,
+	2048, 2048 };
+
+const uint16_t filter_3tap_64p_upscale[99] = {
+	2048, 2048, 0,
+	1960, 2140, 16376,
+	1876, 2236, 16364,
+	1792, 2328, 16356,
+	1708, 2424, 16348,
+	1620, 2516, 16336,
+	1540, 2612, 16328,
+	1456, 2704, 16316,
+	1372, 2796, 16308,
+	1292, 2884, 16296,
+	1212, 2976, 16288,
+	1136, 3060, 16280,
+	1056, 3148, 16272,
+	984, 3228, 16264,
+	908, 3312, 16256,
+	836, 3388, 16248,
+	768, 3464, 16244,
+	700, 3536, 16240,
+	636, 3604, 16236,
+	572, 3668, 16236,
+	512, 3728, 16236,
+	456, 3784, 16236,
+	400, 3836, 16240,
+	348, 3884, 16244,
+	296, 3928, 16252,
+	252, 3964, 16260,
+	204, 4000, 16268,
+	164, 4028, 16284,
+	124, 4052, 16296,
+	88, 4072, 16316,
+	56, 4084, 16336,
+	24, 4092, 16356,
+	0, 4096, 0
+};
+
+const uint16_t filter_3tap_64p_117[99] = {
+	2048, 2048, 0,
+	1992, 2104, 16380,
+	1936, 2160, 16380,
+	1880, 2220, 16376,
+	1824, 2276, 16376,
+	1768, 2332, 16376,
+	1712, 2388, 16376,
+	1656, 2444, 16376,
+	1600, 2496, 16380,
+	1544, 2548, 0,
+	1488, 2600, 4,
+	1432, 2652, 8,
+	1376, 2700, 16,
+	1320, 2748, 20,
+	1264, 2796, 32,
+	1212, 2840, 40,
+	1156, 2880, 52,
+	1104, 2920, 64,
+	1052, 2960, 80,
+	1000, 2996, 92,
+	948, 3032, 108,
+	900, 3060, 128,
+	852, 3092, 148,
+	804, 3120, 168,
+	756, 3144, 192,
+	712, 3164, 216,
+	668, 3184, 240,
+	624, 3200, 268,
+	580, 3212, 296,
+	540, 3220, 328,
+	500, 3228, 360,
+	464, 3232, 392,
+	428, 3236, 428
+};
+
+const uint16_t filter_3tap_64p_150[99] = {
+	2048, 2048, 0,
+	2004, 2080, 8,
+	1960, 2116, 16,
+	1916, 2148, 28,
+	1872, 2184, 36,
+	1824, 2216, 48,
+	1780, 2248, 60,
+	1736, 2280, 76,
+	1692, 2308, 88,
+	1648, 2336, 104,
+	1604, 2368, 120,
+	1560, 2392, 136,
+	1516, 2420, 156,
+	1472, 2444, 172,
+	1428, 2472, 192,
+	1384, 2492, 212,
+	1340, 2516, 236,
+	1296, 2536, 256,
+	1252, 2556, 280,
+	1212, 2576, 304,
+	1168, 2592, 328,
+	1124, 2608, 356,
+	1084, 2624, 384,
+	1044, 2636, 412,
+	1004, 2648, 440,
+	964, 2660, 468,
+	924, 2668, 500,
+	884, 2676, 528,
+	844, 2684, 560,
+	808, 2688, 596,
+	768, 2692, 628,
+	732, 2696, 664,
+	696, 2696, 696
+};
+
+const uint16_t filter_3tap_64p_183[99] = {
+	2048, 2048, 0,
+	2008, 2060, 20,
+	1968, 2076, 44,
+	1932, 2088, 68,
+	1892, 2104, 92,
+	1856, 2116, 120,
+	1816, 2128, 144,
+	1780, 2140, 168,
+	1744, 2152, 196,
+	1704, 2164, 220,
+	1668, 2176, 248,
+	1632, 2188, 272,
+	1592, 2196, 300,
+	1556, 2204, 328,
+	1520, 2216, 356,
+	1484, 2224, 384,
+	1448, 2232, 412,
+	1412, 2240, 440,
+	1376, 2244, 468,
+	1340, 2252, 496,
+	1304, 2256, 528,
+	1272, 2264, 556,
+	1236, 2268, 584,
+	1200, 2272, 616,
+	1168, 2276, 648,
+	1132, 2280, 676,
+	1100, 2284, 708,
+	1064, 2288, 740,
+	1032, 2288, 772,
+	996, 2292, 800,
+	964, 2292, 832,
+	932, 2292, 868,
+	900, 2292, 900
+};
+
+const uint16_t filter_4tap_64p_upscale[132] = {
+	0, 4096, 0, 0,
+	16344, 4092, 40, 0,
+	16308, 4084, 84, 16380,
+	16272, 4072, 132, 16380,
+	16240, 4056, 180, 16380,
+	16212, 4036, 232, 16376,
+	16184, 4012, 288, 16372,
+	16160, 3984, 344, 16368,
+	16136, 3952, 404, 16364,
+	16116, 3916, 464, 16360,
+	16100, 3872, 528, 16356,
+	16084, 3828, 596, 16348,
+	16072, 3780, 664, 16344,
+	16060, 3728, 732, 16336,
+	16052, 3676, 804, 16328,
+	16044, 3616, 876, 16320,
+	16040, 3556, 952, 16312,
+	16036, 3492, 1028, 16300,
+	16032, 3424, 1108, 16292,
+	16032, 3356, 1188, 16280,
+	16036, 3284, 1268, 16272,
+	16036, 3212, 1352, 16260,
+	16040, 3136, 1436, 16248,
+	16044, 3056, 1520, 16236,
+	16052, 2980, 1604, 16224,
+	16060, 2896, 1688, 16212,
+	16064, 2816, 1776, 16200,
+	16076, 2732, 1864, 16188,
+	16084, 2648, 1952, 16176,
+	16092, 2564, 2040, 16164,
+	16104, 2476, 2128, 16152,
+	16116, 2388, 2216, 16140,
+	16128, 2304, 2304, 16128 };
+
+const uint16_t filter_4tap_64p_117[132] = {
+	420, 3248, 420, 0,
+	380, 3248, 464, 16380,
+	344, 3248, 508, 16372,
+	308, 3248, 552, 16368,
+	272, 3240, 596, 16364,
+	236, 3236, 644, 16356,
+	204, 3224, 692, 16352,
+	172, 3212, 744, 16344,
+	144, 3196, 796, 16340,
+	116, 3180, 848, 16332,
+	88, 3160, 900, 16324,
+	60, 3136, 956, 16320,
+	36, 3112, 1012, 16312,
+	16, 3084, 1068, 16304,
+	16380, 3056, 1124, 16296,
+	16360, 3024, 1184, 16292,
+	16340, 2992, 1244, 16284,
+	16324, 2956, 1304, 16276,
+	16308, 2920, 1364, 16268,
+	16292, 2880, 1424, 16264,
+	16280, 2836, 1484, 16256,
+	16268, 2792, 1548, 16252,
+	16256, 2748, 1608, 16244,
+	16248, 2700, 1668, 16240,
+	16240, 2652, 1732, 16232,
+	16232, 2604, 1792, 16228,
+	16228, 2552, 1856, 16224,
+	16220, 2500, 1916, 16220,
+	16216, 2444, 1980, 16216,
+	16216, 2388, 2040, 16216,
+	16212, 2332, 2100, 16212,
+	16212, 2276, 2160, 16212,
+	16212, 2220, 2220, 16212 };
+
+const uint16_t filter_4tap_64p_150[132] = {
+	696, 2700, 696, 0,
+	660, 2704, 732, 16380,
+	628, 2704, 768, 16376,
+	596, 2704, 804, 16372,
+	564, 2700, 844, 16364,
+	532, 2696, 884, 16360,
+	500, 2692, 924, 16356,
+	472, 2684, 964, 16352,
+	440, 2676, 1004, 16352,
+	412, 2668, 1044, 16348,
+	384, 2656, 1088, 16344,
+	360, 2644, 1128, 16340,
+	332, 2632, 1172, 16336,
+	308, 2616, 1216, 16336,
+	284, 2600, 1260, 16332,
+	260, 2580, 1304, 16332,
+	236, 2560, 1348, 16328,
+	216, 2540, 1392, 16328,
+	196, 2516, 1436, 16328,
+	176, 2492, 1480, 16324,
+	156, 2468, 1524, 16324,
+	136, 2440, 1568, 16328,
+	120, 2412, 1612, 16328,
+	104, 2384, 1656, 16328,
+	88, 2352, 1700, 16332,
+	72, 2324, 1744, 16332,
+	60, 2288, 1788, 16336,
+	48, 2256, 1828, 16340,
+	36, 2220, 1872, 16344,
+	24, 2184, 1912, 16352,
+	12, 2148, 1952, 16356,
+	4, 2112, 1996, 16364,
+	16380, 2072, 2036, 16372 };
+
+const uint16_t filter_4tap_64p_183[132] = {
+	944, 2204, 944, 0,
+	916, 2204, 972, 0,
+	888, 2200, 996, 0,
+	860, 2200, 1024, 4,
+	832, 2196, 1052, 4,
+	808, 2192, 1080, 8,
+	780, 2188, 1108, 12,
+	756, 2180, 1140, 12,
+	728, 2176, 1168, 16,
+	704, 2168, 1196, 20,
+	680, 2160, 1224, 24,
+	656, 2152, 1252, 28,
+	632, 2144, 1280, 36,
+	608, 2132, 1308, 40,
+	584, 2120, 1336, 48,
+	560, 2112, 1364, 52,
+	536, 2096, 1392, 60,
+	516, 2084, 1420, 68,
+	492, 2072, 1448, 76,
+	472, 2056, 1476, 84,
+	452, 2040, 1504, 92,
+	428, 2024, 1532, 100,
+	408, 2008, 1560, 112,
+	392, 1992, 1584, 120,
+	372, 1972, 1612, 132,
+	352, 1956, 1636, 144,
+	336, 1936, 1664, 156,
+	316, 1916, 1688, 168,
+	300, 1896, 1712, 180,
+	284, 1876, 1736, 192,
+	268, 1852, 1760, 208,
+	252, 1832, 1784, 220,
+	236, 1808, 1808, 236 };
+
+const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio)
+{
+	if (ratio.value < dal_fixed31_32_one.value)
+		return filter_3tap_16p_upscale;
+	else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+		return filter_3tap_16p_117;
+	else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+		return filter_3tap_16p_150;
+	else
+		return filter_3tap_16p_183;
+}
+
+const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio)
+{
+	if (ratio.value < dal_fixed31_32_one.value)
+		return filter_3tap_64p_upscale;
+	else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+		return filter_3tap_64p_117;
+	else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+		return filter_3tap_64p_150;
+	else
+		return filter_3tap_64p_183;
+}
+
+const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio)
+{
+	if (ratio.value < dal_fixed31_32_one.value)
+		return filter_4tap_16p_upscale;
+	else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+		return filter_4tap_16p_117;
+	else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+		return filter_4tap_16p_150;
+	else
+		return filter_4tap_16p_183;
+}
+
+const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio)
+{
+	if (ratio.value < dal_fixed31_32_one.value)
+		return filter_4tap_64p_upscale;
+	else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+		return filter_4tap_64p_117;
+	else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+		return filter_4tap_64p_150;
+	else
+		return filter_4tap_64p_183;
+}

+ 1302 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c

@@ -0,0 +1,1302 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_bios_types.h"
+#include "dce_stream_encoder.h"
+#include "reg_helper.h"
+
+enum DP_PIXEL_ENCODING {
+DP_PIXEL_ENCODING_RGB444                 = 0x00000000,
+DP_PIXEL_ENCODING_YCBCR422               = 0x00000001,
+DP_PIXEL_ENCODING_YCBCR444               = 0x00000002,
+DP_PIXEL_ENCODING_RGB_WIDE_GAMUT         = 0x00000003,
+DP_PIXEL_ENCODING_Y_ONLY                 = 0x00000004,
+DP_PIXEL_ENCODING_YCBCR420               = 0x00000005,
+DP_PIXEL_ENCODING_RESERVED               = 0x00000006,
+};
+
+
+enum DP_COMPONENT_DEPTH {
+DP_COMPONENT_DEPTH_6BPC                  = 0x00000000,
+DP_COMPONENT_DEPTH_8BPC                  = 0x00000001,
+DP_COMPONENT_DEPTH_10BPC                 = 0x00000002,
+DP_COMPONENT_DEPTH_12BPC                 = 0x00000003,
+DP_COMPONENT_DEPTH_16BPC                 = 0x00000004,
+DP_COMPONENT_DEPTH_RESERVED              = 0x00000005,
+};
+
+
+#define REG(reg)\
+	(enc110->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+	enc110->se_shift->field_name, enc110->se_mask->field_name
+
+#define VBI_LINE_0 0
+#define DP_BLANK_MAX_RETRY 20
+#define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000
+
+#ifndef TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK
+	#define TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK       0x00000010L
+	#define TMDS_CNTL__TMDS_COLOR_FORMAT_MASK         0x00000300L
+	#define	TMDS_CNTL__TMDS_PIXEL_ENCODING__SHIFT     0x00000004
+	#define	TMDS_CNTL__TMDS_COLOR_FORMAT__SHIFT       0x00000008
+#endif
+
+enum {
+	DP_MST_UPDATE_MAX_RETRY = 50
+};
+
+#define DCE110_SE(audio)\
+	container_of(audio, struct dce110_stream_encoder, base)
+
+#define CTX \
+	enc110->base.ctx
+
+static void dce110_update_generic_info_packet(
+	struct dce110_stream_encoder *enc110,
+	uint32_t packet_index,
+	const struct encoder_info_packet *info_packet)
+{
+	uint32_t regval;
+	/* TODOFPGA Figure out a proper number for max_retries polling for lock
+	 * use 50 for now.
+	 */
+	uint32_t max_retries = 50;
+
+	if (REG(AFMT_VBI_PACKET_CONTROL1)) {
+		if (packet_index >= 8)
+			ASSERT(0);
+
+		/* poll dig_update_lock is not locked -> asic internal signal
+		 * assume otg master lock will unlock it
+		 */
+		REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS,
+				1, 10, max_retries);
+
+		/* check if HW reading GSP memory */
+		REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT,
+				1, 10, max_retries);
+
+		/* HW does is not reading GSP memory not reading too long ->
+		 * something wrong. clear GPS memory access and notify?
+		 * hw SW is writing to GSP memory
+		 */
+		REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1);
+	}
+	/* choose which generic packet to use */
+	{
+		regval = REG_READ(AFMT_VBI_PACKET_CONTROL);
+		REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
+				AFMT_GENERIC_INDEX, packet_index);
+	}
+
+	/* write generic packet header
+	 * (4th byte is for GENERIC0 only) */
+	{
+		REG_SET_4(AFMT_GENERIC_HDR, 0,
+				AFMT_GENERIC_HB0, info_packet->hb0,
+				AFMT_GENERIC_HB1, info_packet->hb1,
+				AFMT_GENERIC_HB2, info_packet->hb2,
+				AFMT_GENERIC_HB3, info_packet->hb3);
+	}
+
+	/* write generic packet contents
+	 * (we never use last 4 bytes)
+	 * there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers */
+	{
+		const uint32_t *content =
+			(const uint32_t *) &info_packet->sb[0];
+
+		REG_WRITE(AFMT_GENERIC_0, *content++);
+		REG_WRITE(AFMT_GENERIC_1, *content++);
+		REG_WRITE(AFMT_GENERIC_2, *content++);
+		REG_WRITE(AFMT_GENERIC_3, *content++);
+		REG_WRITE(AFMT_GENERIC_4, *content++);
+		REG_WRITE(AFMT_GENERIC_5, *content++);
+		REG_WRITE(AFMT_GENERIC_6, *content);
+		REG_WRITE(AFMT_GENERIC_7, 0);
+	}
+
+	if (!REG(AFMT_VBI_PACKET_CONTROL1)) {
+		/* force double-buffered packet update */
+		REG_UPDATE_2(AFMT_VBI_PACKET_CONTROL,
+			AFMT_GENERIC0_UPDATE, (packet_index == 0),
+			AFMT_GENERIC2_UPDATE, (packet_index == 2));
+	}
+}
+
+static void dce110_update_hdmi_info_packet(
+	struct dce110_stream_encoder *enc110,
+	uint32_t packet_index,
+	const struct encoder_info_packet *info_packet)
+{
+	struct dc_context *ctx = enc110->base.ctx;
+	uint32_t cont, send, line;
+
+	if (info_packet->valid) {
+		dce110_update_generic_info_packet(
+			enc110,
+			packet_index,
+			info_packet);
+
+		/* enable transmission of packet(s) -
+		 * packet transmission begins on the next frame */
+		cont = 1;
+		/* send packet(s) every frame */
+		send = 1;
+		/* select line number to send packets on */
+		line = 2;
+	} else {
+		cont = 0;
+		send = 0;
+		line = 0;
+	}
+
+	/* choose which generic packet control to use */
+	switch (packet_index) {
+	case 0:
+		REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
+				HDMI_GENERIC0_CONT, cont,
+				HDMI_GENERIC0_SEND, send,
+				HDMI_GENERIC0_LINE, line);
+		break;
+	case 1:
+		REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
+				HDMI_GENERIC1_CONT, cont,
+				HDMI_GENERIC1_SEND, send,
+				HDMI_GENERIC1_LINE, line);
+		break;
+	case 2:
+		REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
+				HDMI_GENERIC0_CONT, cont,
+				HDMI_GENERIC0_SEND, send,
+				HDMI_GENERIC0_LINE, line);
+		break;
+	case 3:
+		REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
+				HDMI_GENERIC1_CONT, cont,
+				HDMI_GENERIC1_SEND, send,
+				HDMI_GENERIC1_LINE, line);
+		break;
+	default:
+		/* invalid HW packet index */
+		dm_logger_write(
+			ctx->logger, LOG_WARNING,
+			"Invalid HW packet index: %s()\n",
+			__func__);
+		return;
+	}
+}
+
+/* setup stream encoder in dp mode */
+static void dce110_stream_encoder_dp_set_stream_attribute(
+	struct stream_encoder *enc,
+	struct dc_crtc_timing *crtc_timing,
+	enum dc_color_space output_color_space)
+{
+
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+	/* set pixel encoding */
+	switch (crtc_timing->pixel_encoding) {
+	case PIXEL_ENCODING_YCBCR422:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+				DP_PIXEL_ENCODING_YCBCR422);
+		break;
+	case PIXEL_ENCODING_YCBCR444:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+				DP_PIXEL_ENCODING_YCBCR444);
+
+		if (crtc_timing->flags.Y_ONLY)
+			if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
+				/* HW testing only, no use case yet.
+				 * Color depth of Y-only could be
+				 * 8, 10, 12, 16 bits */
+				REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+						DP_PIXEL_ENCODING_Y_ONLY);
+		/* Note: DP_MSA_MISC1 bit 7 is the indicator
+		 * of Y-only mode.
+		 * This bit is set in HW if register
+		 * DP_PIXEL_ENCODING is programmed to 0x4 */
+		break;
+	case PIXEL_ENCODING_YCBCR420:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+				DP_PIXEL_ENCODING_YCBCR420);
+		if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
+			REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
+
+		break;
+	default:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+				DP_PIXEL_ENCODING_RGB444);
+		break;
+	}
+
+	/* set color depth */
+
+	switch (crtc_timing->display_color_depth) {
+	case COLOR_DEPTH_666:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+				0);
+		break;
+	case COLOR_DEPTH_888:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+				DP_COMPONENT_DEPTH_8BPC);
+		break;
+	case COLOR_DEPTH_101010:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+				DP_COMPONENT_DEPTH_10BPC);
+
+		break;
+	case COLOR_DEPTH_121212:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+				DP_COMPONENT_DEPTH_12BPC);
+		break;
+	default:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+				DP_COMPONENT_DEPTH_6BPC);
+		break;
+	}
+
+	/* set dynamic range and YCbCr range */
+	if (enc110->se_mask->DP_DYN_RANGE && enc110->se_mask->DP_YCBCR_RANGE)
+		REG_UPDATE_2(
+			DP_PIXEL_FORMAT,
+			DP_DYN_RANGE, 0,
+			DP_YCBCR_RANGE, 0);
+
+}
+
+static void dce110_stream_encoder_set_stream_attribute_helper(
+		struct dce110_stream_encoder *enc110,
+		struct dc_crtc_timing *crtc_timing)
+{
+	if (enc110->regs->TMDS_CNTL) {
+		switch (crtc_timing->pixel_encoding) {
+		case PIXEL_ENCODING_YCBCR422:
+			REG_UPDATE(TMDS_CNTL, TMDS_PIXEL_ENCODING, 1);
+			break;
+		default:
+			REG_UPDATE(TMDS_CNTL, TMDS_PIXEL_ENCODING, 0);
+			break;
+		}
+		REG_UPDATE(TMDS_CNTL, TMDS_COLOR_FORMAT, 0);
+	} else if (enc110->regs->DIG_FE_CNTL) {
+		switch (crtc_timing->pixel_encoding) {
+		case PIXEL_ENCODING_YCBCR422:
+			REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 1);
+			break;
+		default:
+			REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 0);
+			break;
+		}
+		REG_UPDATE(DIG_FE_CNTL, TMDS_COLOR_FORMAT, 0);
+	}
+
+}
+
+/* setup stream encoder in hdmi mode */
+static void dce110_stream_encoder_hdmi_set_stream_attribute(
+	struct stream_encoder *enc,
+	struct dc_crtc_timing *crtc_timing,
+	int actual_pix_clk_khz,
+	bool enable_audio)
+{
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+	struct bp_encoder_control cntl = {0};
+
+	cntl.action = ENCODER_CONTROL_SETUP;
+	cntl.engine_id = enc110->base.id;
+	cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+	cntl.enable_dp_audio = enable_audio;
+	cntl.pixel_clock = actual_pix_clk_khz;
+	cntl.lanes_number = LANE_COUNT_FOUR;
+
+	if (enc110->base.bp->funcs->encoder_control(
+			enc110->base.bp, &cntl) != BP_RESULT_OK)
+		return;
+
+	dce110_stream_encoder_set_stream_attribute_helper(enc110, crtc_timing);
+
+	/* setup HDMI engine */
+	if (!enc110->se_mask->HDMI_DATA_SCRAMBLE_EN) {
+		REG_UPDATE_3(HDMI_CONTROL,
+			HDMI_PACKET_GEN_VERSION, 1,
+			HDMI_KEEPOUT_MODE, 1,
+			HDMI_DEEP_COLOR_ENABLE, 0);
+	} else if (enc110->regs->DIG_FE_CNTL) {
+		REG_UPDATE_5(HDMI_CONTROL,
+			HDMI_PACKET_GEN_VERSION, 1,
+			HDMI_KEEPOUT_MODE, 1,
+			HDMI_DEEP_COLOR_ENABLE, 0,
+			HDMI_DATA_SCRAMBLE_EN, 0,
+			HDMI_CLOCK_CHANNEL_RATE, 0);
+	}
+
+	switch (crtc_timing->display_color_depth) {
+	case COLOR_DEPTH_888:
+		REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
+		break;
+	case COLOR_DEPTH_101010:
+		REG_UPDATE_2(HDMI_CONTROL,
+			HDMI_DEEP_COLOR_DEPTH, 1,
+			HDMI_DEEP_COLOR_ENABLE, 1);
+		break;
+	case COLOR_DEPTH_121212:
+		REG_UPDATE_2(HDMI_CONTROL,
+			HDMI_DEEP_COLOR_DEPTH, 2,
+			HDMI_DEEP_COLOR_ENABLE, 1);
+		break;
+	case COLOR_DEPTH_161616:
+		REG_UPDATE_2(HDMI_CONTROL,
+			HDMI_DEEP_COLOR_DEPTH, 3,
+			HDMI_DEEP_COLOR_ENABLE, 1);
+		break;
+	default:
+		break;
+	}
+
+	if (enc110->se_mask->HDMI_DATA_SCRAMBLE_EN) {
+		if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) {
+			/* enable HDMI data scrambler
+			 * HDMI_CLOCK_CHANNEL_RATE_MORE_340M
+			 * Clock channel frequency is 1/4 of character rate.
+			 */
+			REG_UPDATE_2(HDMI_CONTROL,
+				HDMI_DATA_SCRAMBLE_EN, 1,
+				HDMI_CLOCK_CHANNEL_RATE, 1);
+		} else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) {
+
+			/* TODO: New feature for DCE11, still need to implement */
+
+			/* enable HDMI data scrambler
+			 * HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE
+			 * Clock channel frequency is the same
+			 * as character rate
+			 */
+			REG_UPDATE_2(HDMI_CONTROL,
+				HDMI_DATA_SCRAMBLE_EN, 1,
+				HDMI_CLOCK_CHANNEL_RATE, 0);
+		}
+	}
+
+	REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL,
+		HDMI_GC_CONT, 1,
+		HDMI_GC_SEND, 1,
+		HDMI_NULL_SEND, 1);
+
+	/* following belongs to audio */
+	REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
+
+	REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+
+	REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE,
+				VBI_LINE_0 + 2);
+
+	REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0);
+
+}
+
+/* setup stream encoder in dvi mode */
+static void dce110_stream_encoder_dvi_set_stream_attribute(
+	struct stream_encoder *enc,
+	struct dc_crtc_timing *crtc_timing,
+	bool is_dual_link)
+{
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+	struct bp_encoder_control cntl = {0};
+
+	cntl.action = ENCODER_CONTROL_SETUP;
+	cntl.engine_id = enc110->base.id;
+	cntl.signal = is_dual_link ?
+			SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK;
+	cntl.enable_dp_audio = false;
+	cntl.pixel_clock = crtc_timing->pix_clk_khz;
+	cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR;
+
+	if (enc110->base.bp->funcs->encoder_control(
+			enc110->base.bp, &cntl) != BP_RESULT_OK)
+		return;
+
+	ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
+	ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888);
+	dce110_stream_encoder_set_stream_attribute_helper(enc110, crtc_timing);
+}
+
+static void dce110_stream_encoder_set_mst_bandwidth(
+	struct stream_encoder *enc,
+	struct fixed31_32 avg_time_slots_per_mtp)
+{
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+	uint32_t x = dal_fixed31_32_floor(
+		avg_time_slots_per_mtp);
+	uint32_t y = dal_fixed31_32_ceil(
+		dal_fixed31_32_shl(
+			dal_fixed31_32_sub_int(
+				avg_time_slots_per_mtp,
+				x),
+			26));
+
+	{
+		REG_SET_2(DP_MSE_RATE_CNTL, 0,
+			DP_MSE_RATE_X, x,
+			DP_MSE_RATE_Y, y);
+	}
+
+	/* wait for update to be completed on the link */
+	/* i.e. DP_MSE_RATE_UPDATE_PENDING field (read only) */
+	/* is reset to 0 (not pending) */
+	REG_WAIT(DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING,
+			0,
+			10, DP_MST_UPDATE_MAX_RETRY);
+}
+
+static void dce110_stream_encoder_update_hdmi_info_packets(
+	struct stream_encoder *enc,
+	const struct encoder_info_frame *info_frame)
+{
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+	if (enc110->se_mask->HDMI_AVI_INFO_CONT &&
+			enc110->se_mask->HDMI_AVI_INFO_SEND) {
+
+		if (info_frame->avi.valid) {
+			const uint32_t *content =
+				(const uint32_t *) &info_frame->avi.sb[0];
+
+			REG_WRITE(AFMT_AVI_INFO0, content[0]);
+
+			REG_WRITE(AFMT_AVI_INFO1, content[1]);
+
+			REG_WRITE(AFMT_AVI_INFO2, content[2]);
+
+			REG_WRITE(AFMT_AVI_INFO3, content[3]);
+
+			REG_UPDATE(AFMT_AVI_INFO3, AFMT_AVI_INFO_VERSION,
+						info_frame->avi.hb1);
+
+			REG_UPDATE_2(HDMI_INFOFRAME_CONTROL0,
+					HDMI_AVI_INFO_SEND, 1,
+					HDMI_AVI_INFO_CONT, 1);
+
+			REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE,
+							VBI_LINE_0 + 2);
+
+		} else {
+			REG_UPDATE_2(HDMI_INFOFRAME_CONTROL0,
+				HDMI_AVI_INFO_SEND, 0,
+				HDMI_AVI_INFO_CONT, 0);
+		}
+	}
+
+	if (enc110->se_mask->HDMI_AVI_INFO_CONT &&
+			enc110->se_mask->HDMI_AVI_INFO_SEND) {
+		dce110_update_hdmi_info_packet(enc110, 0, &info_frame->vendor);
+		dce110_update_hdmi_info_packet(enc110, 1, &info_frame->gamut);
+		dce110_update_hdmi_info_packet(enc110, 2, &info_frame->spd);
+	}
+
+}
+
+static void dce110_stream_encoder_stop_hdmi_info_packets(
+	struct stream_encoder *enc)
+{
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+	/* stop generic packets 0 & 1 on HDMI */
+	REG_SET_6(HDMI_GENERIC_PACKET_CONTROL0, 0,
+		HDMI_GENERIC1_CONT, 0,
+		HDMI_GENERIC1_LINE, 0,
+		HDMI_GENERIC1_SEND, 0,
+		HDMI_GENERIC0_CONT, 0,
+		HDMI_GENERIC0_LINE, 0,
+		HDMI_GENERIC0_SEND, 0);
+
+	/* stop generic packets 2 & 3 on HDMI */
+	REG_SET_6(HDMI_GENERIC_PACKET_CONTROL1, 0,
+		HDMI_GENERIC0_CONT, 0,
+		HDMI_GENERIC0_LINE, 0,
+		HDMI_GENERIC0_SEND, 0,
+		HDMI_GENERIC1_CONT, 0,
+		HDMI_GENERIC1_LINE, 0,
+		HDMI_GENERIC1_SEND, 0);
+
+}
+
+static void dce110_stream_encoder_update_dp_info_packets(
+	struct stream_encoder *enc,
+	const struct encoder_info_frame *info_frame)
+{
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+	uint32_t value = REG_READ(DP_SEC_CNTL);
+
+	if (info_frame->vsc.valid)
+		dce110_update_generic_info_packet(
+			enc110,
+			0,  /* packetIndex */
+			&info_frame->vsc);
+
+	/* enable/disable transmission of packet(s).
+	*  If enabled, packet transmission begins on the next frame
+	*/
+		REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
+
+	/* This bit is the master enable bit.
+	* When enabling secondary stream engine,
+	* this master bit must also be set.
+	* This register shared with audio info frame.
+	* Therefore we need to enable master bit
+	* if at least on of the fields is not 0
+	*/
+	if (value)
+		REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+}
+
+static void dce110_stream_encoder_stop_dp_info_packets(
+	struct stream_encoder *enc)
+{
+	/* stop generic packets on DP */
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+	uint32_t value = REG_READ(DP_SEC_CNTL);
+
+	if (enc110->se_mask->DP_SEC_AVI_ENABLE) {
+		REG_SET_7(DP_SEC_CNTL, 0,
+			DP_SEC_GSP0_ENABLE, 0,
+			DP_SEC_GSP1_ENABLE, 0,
+			DP_SEC_GSP2_ENABLE, 0,
+			DP_SEC_GSP3_ENABLE, 0,
+			DP_SEC_AVI_ENABLE, 0,
+			DP_SEC_MPG_ENABLE, 0,
+			DP_SEC_STREAM_ENABLE, 0);
+	}
+
+	/* this register shared with audio info frame.
+	 * therefore we need to keep master enabled
+	 * if at least one of the fields is not 0 */
+
+	if (value)
+		REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+}
+
+static void dce110_stream_encoder_dp_blank(
+	struct stream_encoder *enc)
+{
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+	uint32_t retries = 0;
+	uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
+
+	/* Note: For CZ, we are changing driver default to disable
+	 * stream deferred to next VBLANK. If results are positive, we
+	 * will make the same change to all DCE versions. There are a
+	 * handful of panels that cannot handle disable stream at
+	 * HBLANK and will result in a white line flash across the
+	 * screen on stream disable. */
+
+	/* Specify the video stream disable point
+	 * (2 = start of the next vertical blank) */
+	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
+	/* Larger delay to wait until VBLANK - use max retry of
+	* 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
+	* a little more because we may not trust delay accuracy.
+	*/
+	max_retries = DP_BLANK_MAX_RETRY * 150;
+
+	/* disable DP stream */
+	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
+
+	/* the encoder stops sending the video stream
+	* at the start of the vertical blanking.
+	* Poll for DP_VID_STREAM_STATUS == 0
+	*/
+
+	REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS,
+			1,
+			10, max_retries);
+
+	ASSERT(retries <= max_retries);
+
+	/* Tell the DP encoder to ignore timing from CRTC, must be done after
+	* the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
+	* complete, stream status will be stuck in video stream enabled state,
+	* i.e. DP_VID_STREAM_STATUS stuck at 1.
+	*/
+
+	REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
+}
+
+/* output video stream to link encoder */
+static void dce110_stream_encoder_dp_unblank(
+	struct stream_encoder *enc,
+	const struct encoder_unblank_param *param)
+{
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+	if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
+		uint32_t n_vid = 0x8000;
+		uint32_t m_vid;
+
+		/* M / N = Fstream / Flink
+		* m_vid / n_vid = pixel rate / link rate
+		*/
+
+		uint64_t m_vid_l = n_vid;
+
+		m_vid_l *= param->crtc_timing.pixel_clock;
+		m_vid_l = div_u64(m_vid_l,
+			param->link_settings.link_rate
+				* LINK_RATE_REF_FREQ_IN_KHZ);
+
+		m_vid = (uint32_t) m_vid_l;
+
+		/* enable auto measurement */
+
+		REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0);
+
+		/* auto measurement need 1 full 0x8000 symbol cycle to kick in,
+		 * therefore program initial value for Mvid and Nvid
+		 */
+
+		REG_UPDATE(DP_VID_N, DP_VID_N, n_vid);
+
+		REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
+
+		REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 1);
+	}
+
+	/* set DIG_START to 0x1 to resync FIFO */
+
+	REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+
+	/* switch DP encoder to CRTC data */
+
+	REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
+
+	/* wait 100us for DIG/DP logic to prime
+	* (i.e. a few video lines)
+	*/
+	udelay(100);
+
+	/* the hardware would start sending video at the start of the next DP
+	* frame (i.e. rising edge of the vblank).
+	* NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this
+	* register has no effect on enable transition! HW always guarantees
+	* VID_STREAM enable at start of next frame, and this is not
+	* programmable
+	*/
+
+	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+}
+
+
+#define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000
+#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1
+
+#include "include/audio_types.h"
+
+/**
+* speakersToChannels
+*
+* @brief
+*  translate speakers to channels
+*
+*  FL  - Front Left
+*  FR  - Front Right
+*  RL  - Rear Left
+*  RR  - Rear Right
+*  RC  - Rear Center
+*  FC  - Front Center
+*  FLC - Front Left Center
+*  FRC - Front Right Center
+*  RLC - Rear Left Center
+*  RRC - Rear Right Center
+*  LFE - Low Freq Effect
+*
+*               FC
+*          FLC      FRC
+*    FL                    FR
+*
+*                    LFE
+*              ()
+*
+*
+*    RL                    RR
+*          RLC      RRC
+*               RC
+*
+*             ch  8   7   6   5   4   3   2   1
+* 0b00000011      -   -   -   -   -   -   FR  FL
+* 0b00000111      -   -   -   -   -   LFE FR  FL
+* 0b00001011      -   -   -   -   FC  -   FR  FL
+* 0b00001111      -   -   -   -   FC  LFE FR  FL
+* 0b00010011      -   -   -   RC  -   -   FR  FL
+* 0b00010111      -   -   -   RC  -   LFE FR  FL
+* 0b00011011      -   -   -   RC  FC  -   FR  FL
+* 0b00011111      -   -   -   RC  FC  LFE FR  FL
+* 0b00110011      -   -   RR  RL  -   -   FR  FL
+* 0b00110111      -   -   RR  RL  -   LFE FR  FL
+* 0b00111011      -   -   RR  RL  FC  -   FR  FL
+* 0b00111111      -   -   RR  RL  FC  LFE FR  FL
+* 0b01110011      -   RC  RR  RL  -   -   FR  FL
+* 0b01110111      -   RC  RR  RL  -   LFE FR  FL
+* 0b01111011      -   RC  RR  RL  FC  -   FR  FL
+* 0b01111111      -   RC  RR  RL  FC  LFE FR  FL
+* 0b11110011      RRC RLC RR  RL  -   -   FR  FL
+* 0b11110111      RRC RLC RR  RL  -   LFE FR  FL
+* 0b11111011      RRC RLC RR  RL  FC  -   FR  FL
+* 0b11111111      RRC RLC RR  RL  FC  LFE FR  FL
+* 0b11000011      FRC FLC -   -   -   -   FR  FL
+* 0b11000111      FRC FLC -   -   -   LFE FR  FL
+* 0b11001011      FRC FLC -   -   FC  -   FR  FL
+* 0b11001111      FRC FLC -   -   FC  LFE FR  FL
+* 0b11010011      FRC FLC -   RC  -   -   FR  FL
+* 0b11010111      FRC FLC -   RC  -   LFE FR  FL
+* 0b11011011      FRC FLC -   RC  FC  -   FR  FL
+* 0b11011111      FRC FLC -   RC  FC  LFE FR  FL
+* 0b11110011      FRC FLC RR  RL  -   -   FR  FL
+* 0b11110111      FRC FLC RR  RL  -   LFE FR  FL
+* 0b11111011      FRC FLC RR  RL  FC  -   FR  FL
+* 0b11111111      FRC FLC RR  RL  FC  LFE FR  FL
+*
+* @param
+*  speakers - speaker information as it comes from CEA audio block
+*/
+/* translate speakers to channels */
+
+union audio_cea_channels {
+	uint8_t all;
+	struct audio_cea_channels_bits {
+		uint32_t FL:1;
+		uint32_t FR:1;
+		uint32_t LFE:1;
+		uint32_t FC:1;
+		uint32_t RL_RC:1;
+		uint32_t RR:1;
+		uint32_t RC_RLC_FLC:1;
+		uint32_t RRC_FRC:1;
+	} channels;
+};
+
+struct audio_clock_info {
+	/* pixel clock frequency*/
+	uint32_t pixel_clock_in_10khz;
+	/* N - 32KHz audio */
+	uint32_t n_32khz;
+	/* CTS - 32KHz audio*/
+	uint32_t cts_32khz;
+	uint32_t n_44khz;
+	uint32_t cts_44khz;
+	uint32_t n_48khz;
+	uint32_t cts_48khz;
+};
+
+/* 25.2MHz/1.001*/
+/* 25.2MHz/1.001*/
+/* 25.2MHz*/
+/* 27MHz */
+/* 27MHz*1.001*/
+/* 27MHz*1.001*/
+/* 54MHz*/
+/* 54MHz*1.001*/
+/* 74.25MHz/1.001*/
+/* 74.25MHz*/
+/* 148.5MHz/1.001*/
+/* 148.5MHz*/
+
+static const struct audio_clock_info audio_clock_info_table[12] = {
+	{2517, 4576, 28125, 7007, 31250, 6864, 28125},
+	{2518, 4576, 28125, 7007, 31250, 6864, 28125},
+	{2520, 4096, 25200, 6272, 28000, 6144, 25200},
+	{2700, 4096, 27000, 6272, 30000, 6144, 27000},
+	{2702, 4096, 27027, 6272, 30030, 6144, 27027},
+	{2703, 4096, 27027, 6272, 30030, 6144, 27027},
+	{5400, 4096, 54000, 6272, 60000, 6144, 54000},
+	{5405, 4096, 54054, 6272, 60060, 6144, 54054},
+	{7417, 11648, 210937, 17836, 234375, 11648, 140625},
+	{7425, 4096, 74250, 6272, 82500, 6144, 74250},
+	{14835, 11648, 421875, 8918, 234375, 5824, 140625},
+	{14850, 4096, 148500, 6272, 165000, 6144, 148500}
+};
+
+static const struct audio_clock_info audio_clock_info_table_36bpc[12] = {
+	{2517, 9152, 84375, 7007, 48875, 9152, 56250},
+	{2518, 9152, 84375, 7007, 48875, 9152, 56250},
+	{2520, 4096, 37800, 6272, 42000, 6144, 37800},
+	{2700, 4096, 40500, 6272, 45000, 6144, 40500},
+	{2702, 8192, 81081, 6272, 45045, 8192, 54054},
+	{2703, 8192, 81081, 6272, 45045, 8192, 54054},
+	{5400, 4096, 81000, 6272, 90000, 6144, 81000},
+	{5405, 4096, 81081, 6272, 90090, 6144, 81081},
+	{7417, 11648, 316406, 17836, 351562, 11648, 210937},
+	{7425, 4096, 111375, 6272, 123750, 6144, 111375},
+	{14835, 11648, 632812, 17836, 703125, 11648, 421875},
+	{14850, 4096, 222750, 6272, 247500, 6144, 222750}
+};
+
+static const struct audio_clock_info audio_clock_info_table_48bpc[12] = {
+	{2517, 4576, 56250, 7007, 62500, 6864, 56250},
+	{2518, 4576, 56250, 7007, 62500, 6864, 56250},
+	{2520, 4096, 50400, 6272, 56000, 6144, 50400},
+	{2700, 4096, 54000, 6272, 60000, 6144, 54000},
+	{2702, 4096, 54054, 6267, 60060, 8192, 54054},
+	{2703, 4096, 54054, 6272, 60060, 8192, 54054},
+	{5400, 4096, 108000, 6272, 120000, 6144, 108000},
+	{5405, 4096, 108108, 6272, 120120, 6144, 108108},
+	{7417, 11648, 421875, 17836, 468750, 11648, 281250},
+	{7425, 4096, 148500, 6272, 165000, 6144, 148500},
+	{14835, 11648, 843750, 8918, 468750, 11648, 281250},
+	{14850, 4096, 297000, 6272, 330000, 6144, 297000}
+};
+
+union audio_cea_channels speakers_to_channels(
+	struct audio_speaker_flags speaker_flags)
+{
+	union audio_cea_channels cea_channels = {0};
+
+	/* these are one to one */
+	cea_channels.channels.FL = speaker_flags.FL_FR;
+	cea_channels.channels.FR = speaker_flags.FL_FR;
+	cea_channels.channels.LFE = speaker_flags.LFE;
+	cea_channels.channels.FC = speaker_flags.FC;
+
+	/* if Rear Left and Right exist move RC speaker to channel 7
+	 * otherwise to channel 5
+	 */
+	if (speaker_flags.RL_RR) {
+		cea_channels.channels.RL_RC = speaker_flags.RL_RR;
+		cea_channels.channels.RR = speaker_flags.RL_RR;
+		cea_channels.channels.RC_RLC_FLC = speaker_flags.RC;
+	} else {
+		cea_channels.channels.RL_RC = speaker_flags.RC;
+	}
+
+	/* FRONT Left Right Center and REAR Left Right Center are exclusive */
+	if (speaker_flags.FLC_FRC) {
+		cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC;
+		cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC;
+	} else {
+		cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC;
+		cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC;
+	}
+
+	return cea_channels;
+}
+
+uint32_t calc_max_audio_packets_per_line(
+	const struct audio_crtc_info *crtc_info)
+{
+	uint32_t max_packets_per_line;
+
+	max_packets_per_line =
+		crtc_info->h_total - crtc_info->h_active;
+
+	if (crtc_info->pixel_repetition)
+		max_packets_per_line *= crtc_info->pixel_repetition;
+
+	/* for other hdmi features */
+	max_packets_per_line -= 58;
+	/* for Control Period */
+	max_packets_per_line -= 16;
+	/* Number of Audio Packets per Line */
+	max_packets_per_line /= 32;
+
+	return max_packets_per_line;
+}
+
+bool get_audio_clock_info(
+	enum dc_color_depth color_depth,
+	uint32_t crtc_pixel_clock_in_khz,
+	uint32_t actual_pixel_clock_in_khz,
+	struct audio_clock_info *audio_clock_info)
+{
+	const struct audio_clock_info *clock_info;
+	uint32_t index;
+	uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_in_khz / 10;
+	uint32_t audio_array_size;
+
+	if (audio_clock_info == NULL)
+		return false; /* should not happen */
+
+	switch (color_depth) {
+	case COLOR_DEPTH_161616:
+		clock_info = audio_clock_info_table_48bpc;
+		audio_array_size = ARRAY_SIZE(
+				audio_clock_info_table_48bpc);
+		break;
+	case COLOR_DEPTH_121212:
+		clock_info = audio_clock_info_table_36bpc;
+		audio_array_size = ARRAY_SIZE(
+				audio_clock_info_table_36bpc);
+		break;
+	default:
+		clock_info = audio_clock_info_table;
+		audio_array_size = ARRAY_SIZE(
+				audio_clock_info_table);
+		break;
+	}
+
+	if (clock_info != NULL) {
+		/* search for exact pixel clock in table */
+		for (index = 0; index < audio_array_size; index++) {
+			if (clock_info[index].pixel_clock_in_10khz >
+				crtc_pixel_clock_in_10khz)
+				break;  /* not match */
+			else if (clock_info[index].pixel_clock_in_10khz ==
+					crtc_pixel_clock_in_10khz) {
+				/* match found */
+				*audio_clock_info = clock_info[index];
+				return true;
+			}
+		}
+	}
+
+	/* not found */
+	if (actual_pixel_clock_in_khz == 0)
+		actual_pixel_clock_in_khz = crtc_pixel_clock_in_khz;
+
+	/* See HDMI spec  the table entry under
+	 *  pixel clock of "Other". */
+	audio_clock_info->pixel_clock_in_10khz =
+			actual_pixel_clock_in_khz / 10;
+	audio_clock_info->cts_32khz = actual_pixel_clock_in_khz;
+	audio_clock_info->cts_44khz = actual_pixel_clock_in_khz;
+	audio_clock_info->cts_48khz = actual_pixel_clock_in_khz;
+
+	audio_clock_info->n_32khz = 4096;
+	audio_clock_info->n_44khz = 6272;
+	audio_clock_info->n_48khz = 6144;
+
+	return true;
+}
+
+static void dce110_se_audio_setup(
+	struct stream_encoder *enc,
+	unsigned int az_inst,
+	struct audio_info *audio_info)
+{
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+	uint32_t speakers = 0;
+	uint32_t channels = 0;
+
+	ASSERT(audio_info);
+	if (audio_info == NULL)
+		/* This should not happen.it does so we don't get BSOD*/
+		return;
+
+	speakers = audio_info->flags.info.ALLSPEAKERS;
+	channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
+
+	/* setup the audio stream source select (audio -> dig mapping) */
+	REG_SET(AFMT_AUDIO_SRC_CONTROL, 0, AFMT_AUDIO_SRC_SELECT, az_inst);
+
+	/* Channel allocation */
+	REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels);
+}
+
+static void dce110_se_setup_hdmi_audio(
+	struct stream_encoder *enc,
+	const struct audio_crtc_info *crtc_info)
+{
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+	struct audio_clock_info audio_clock_info = {0};
+	uint32_t max_packets_per_line;
+
+	/* For now still do calculation, although this field is ignored when
+	above HDMI_PACKET_GEN_VERSION set to 1 */
+	max_packets_per_line = calc_max_audio_packets_per_line(crtc_info);
+
+	/* HDMI_AUDIO_PACKET_CONTROL */
+	REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL,
+			HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line,
+			HDMI_AUDIO_DELAY_EN, 1);
+
+	/* AFMT_AUDIO_PACKET_CONTROL */
+	REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+
+	/* AFMT_AUDIO_PACKET_CONTROL2 */
+	REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
+			AFMT_AUDIO_LAYOUT_OVRD, 0,
+			AFMT_60958_OSF_OVRD, 0);
+
+	/* HDMI_ACR_PACKET_CONTROL */
+	REG_UPDATE_3(HDMI_ACR_PACKET_CONTROL,
+			HDMI_ACR_AUTO_SEND, 1,
+			HDMI_ACR_SOURCE, 0,
+			HDMI_ACR_AUDIO_PRIORITY, 0);
+
+	/* Program audio clock sample/regeneration parameters */
+	if (get_audio_clock_info(
+		crtc_info->color_depth,
+		crtc_info->requested_pixel_clock,
+		crtc_info->calculated_pixel_clock,
+		&audio_clock_info)) {
+
+		/* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */
+		REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz);
+
+		/* HDMI_ACR_32_1__HDMI_ACR_N_32_MASK */
+		REG_UPDATE(HDMI_ACR_32_1, HDMI_ACR_N_32, audio_clock_info.n_32khz);
+
+		/* HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK */
+		REG_UPDATE(HDMI_ACR_44_0, HDMI_ACR_CTS_44, audio_clock_info.cts_44khz);
+
+		/* HDMI_ACR_44_1__HDMI_ACR_N_44_MASK */
+		REG_UPDATE(HDMI_ACR_44_1, HDMI_ACR_N_44, audio_clock_info.n_44khz);
+
+		/* HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK */
+		REG_UPDATE(HDMI_ACR_48_0, HDMI_ACR_CTS_48, audio_clock_info.cts_48khz);
+
+		/* HDMI_ACR_48_1__HDMI_ACR_N_48_MASK */
+		REG_UPDATE(HDMI_ACR_48_1, HDMI_ACR_N_48, audio_clock_info.n_48khz);
+
+		/* Video driver cannot know in advance which sample rate will
+		be used by HD Audio driver
+		HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE field is
+		programmed below in interruppt callback */
+	} /* if */
+
+	/* AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK &
+	AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
+	REG_UPDATE_2(AFMT_60958_0,
+			AFMT_60958_CS_CHANNEL_NUMBER_L, 1,
+			AFMT_60958_CS_CLOCK_ACCURACY, 0);
+
+	/* AFMT_60958_1 AFMT_60958_CS_CHALNNEL_NUMBER_R */
+	REG_UPDATE(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
+
+	/*AFMT_60958_2 now keep this settings until
+	 *  Programming guide comes out*/
+	REG_UPDATE_6(AFMT_60958_2,
+			AFMT_60958_CS_CHANNEL_NUMBER_2, 3,
+			AFMT_60958_CS_CHANNEL_NUMBER_3, 4,
+			AFMT_60958_CS_CHANNEL_NUMBER_4, 5,
+			AFMT_60958_CS_CHANNEL_NUMBER_5, 6,
+			AFMT_60958_CS_CHANNEL_NUMBER_6, 7,
+			AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
+}
+
+static void dce110_se_setup_dp_audio(
+	struct stream_encoder *enc)
+{
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+	/* --- DP Audio packet configurations --- */
+
+	/* ATP Configuration */
+	REG_SET(DP_SEC_AUD_N, 0,
+			DP_SEC_AUD_N, DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT);
+
+	/* Async/auto-calc timestamp mode */
+	REG_SET(DP_SEC_TIMESTAMP, 0, DP_SEC_TIMESTAMP_MODE,
+			DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC);
+
+	/* --- The following are the registers
+	 *  copied from the SetupHDMI --- */
+
+	/* AFMT_AUDIO_PACKET_CONTROL */
+	REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+
+	/* AFMT_AUDIO_PACKET_CONTROL2 */
+	/* Program the ATP and AIP next */
+	REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
+			AFMT_AUDIO_LAYOUT_OVRD, 0,
+			AFMT_60958_OSF_OVRD, 0);
+
+	/* AFMT_INFOFRAME_CONTROL0 */
+	REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+
+	/* AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
+	REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0);
+}
+
+static void dce110_se_enable_audio_clock(
+	struct stream_encoder *enc,
+	bool enable)
+{
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+	if (REG(AFMT_CNTL) == 0)
+		return;   /* DCE8/10 does not have this register */
+
+	REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, !!enable);
+
+	/* wait for AFMT clock to turn on,
+	 * expectation: this should complete in 1-2 reads
+	 *
+	 * REG_WAIT(AFMT_CNTL, AFMT_AUDIO_CLOCK_ON, !!enable, 1, 10);
+	 *
+	 * TODO: wait for clock_on does not work well. May need HW
+	 * program sequence. But audio seems work normally even without wait
+	 * for clock_on status change
+	 */
+}
+
+static void dce110_se_enable_dp_audio(
+	struct stream_encoder *enc)
+{
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+	/* Enable Audio packets */
+	REG_UPDATE(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
+
+	/* Program the ATP and AIP next */
+	REG_UPDATE_2(DP_SEC_CNTL,
+			DP_SEC_ATP_ENABLE, 1,
+			DP_SEC_AIP_ENABLE, 1);
+
+	/* Program STREAM_ENABLE after all the other enables. */
+	REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+}
+
+static void dce110_se_disable_dp_audio(
+	struct stream_encoder *enc)
+{
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+	uint32_t value = REG_READ(DP_SEC_CNTL);
+
+	/* Disable Audio packets */
+	REG_UPDATE_5(DP_SEC_CNTL,
+			DP_SEC_ASP_ENABLE, 0,
+			DP_SEC_ATP_ENABLE, 0,
+			DP_SEC_AIP_ENABLE, 0,
+			DP_SEC_ACM_ENABLE, 0,
+			DP_SEC_STREAM_ENABLE, 0);
+
+	/* This register shared with encoder info frame. Therefore we need to
+	keep master enabled if at least on of the fields is not 0 */
+	if (value != 0)
+		REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+}
+
+void dce110_se_audio_mute_control(
+	struct stream_encoder *enc,
+	bool mute)
+{
+	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+	REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute);
+}
+
+void dce110_se_dp_audio_setup(
+	struct stream_encoder *enc,
+	unsigned int az_inst,
+	struct audio_info *info)
+{
+	dce110_se_audio_setup(enc, az_inst, info);
+}
+
+void dce110_se_dp_audio_enable(
+	struct stream_encoder *enc)
+{
+	dce110_se_enable_audio_clock(enc, true);
+	dce110_se_setup_dp_audio(enc);
+	dce110_se_enable_dp_audio(enc);
+}
+
+void dce110_se_dp_audio_disable(
+	struct stream_encoder *enc)
+{
+	dce110_se_disable_dp_audio(enc);
+	dce110_se_enable_audio_clock(enc, false);
+}
+
+void dce110_se_hdmi_audio_setup(
+	struct stream_encoder *enc,
+	unsigned int az_inst,
+	struct audio_info *info,
+	struct audio_crtc_info *audio_crtc_info)
+{
+	dce110_se_enable_audio_clock(enc, true);
+	dce110_se_setup_hdmi_audio(enc, audio_crtc_info);
+	dce110_se_audio_setup(enc, az_inst, info);
+}
+
+void dce110_se_hdmi_audio_disable(
+	struct stream_encoder *enc)
+{
+	dce110_se_enable_audio_clock(enc, false);
+}
+
+static const struct stream_encoder_funcs dce110_str_enc_funcs = {
+	.dp_set_stream_attribute =
+		dce110_stream_encoder_dp_set_stream_attribute,
+	.hdmi_set_stream_attribute =
+		dce110_stream_encoder_hdmi_set_stream_attribute,
+	.dvi_set_stream_attribute =
+		dce110_stream_encoder_dvi_set_stream_attribute,
+	.set_mst_bandwidth =
+		dce110_stream_encoder_set_mst_bandwidth,
+	.update_hdmi_info_packets =
+		dce110_stream_encoder_update_hdmi_info_packets,
+	.stop_hdmi_info_packets =
+		dce110_stream_encoder_stop_hdmi_info_packets,
+	.update_dp_info_packets =
+		dce110_stream_encoder_update_dp_info_packets,
+	.stop_dp_info_packets =
+		dce110_stream_encoder_stop_dp_info_packets,
+	.dp_blank =
+		dce110_stream_encoder_dp_blank,
+	.dp_unblank =
+		dce110_stream_encoder_dp_unblank,
+
+	.audio_mute_control = dce110_se_audio_mute_control,
+
+	.dp_audio_setup = dce110_se_dp_audio_setup,
+	.dp_audio_enable = dce110_se_dp_audio_enable,
+	.dp_audio_disable = dce110_se_dp_audio_disable,
+
+	.hdmi_audio_setup = dce110_se_hdmi_audio_setup,
+	.hdmi_audio_disable = dce110_se_hdmi_audio_disable,
+};
+
+bool dce110_stream_encoder_construct(
+	struct dce110_stream_encoder *enc110,
+	struct dc_context *ctx,
+	struct dc_bios *bp,
+	enum engine_id eng_id,
+	const struct dce110_stream_enc_registers *regs,
+	const struct dce_stream_encoder_shift *se_shift,
+	const struct dce_stream_encoder_mask *se_mask)
+{
+	if (!enc110)
+		return false;
+	if (!bp)
+		return false;
+
+	enc110->base.funcs = &dce110_str_enc_funcs;
+	enc110->base.ctx = ctx;
+	enc110->base.id = eng_id;
+	enc110->base.bp = bp;
+	enc110->regs = regs;
+	enc110->se_shift = se_shift;
+	enc110->se_mask = se_mask;
+
+	return true;
+}

+ 564 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h

@@ -0,0 +1,564 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_STREAM_ENCODER_DCE110_H__
+#define __DC_STREAM_ENCODER_DCE110_H__
+
+#include "stream_encoder.h"
+
+#define DCE110STRENC_FROM_STRENC(stream_encoder)\
+	container_of(stream_encoder, struct dce110_stream_encoder, base)
+
+#ifndef TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK
+	#define TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK       0x00000010L
+	#define TMDS_CNTL__TMDS_COLOR_FORMAT_MASK         0x00000300L
+	#define	TMDS_CNTL__TMDS_PIXEL_ENCODING__SHIFT     0x00000004
+	#define	TMDS_CNTL__TMDS_COLOR_FORMAT__SHIFT       0x00000008
+#endif
+
+
+#define SE_COMMON_REG_LIST_DCE_BASE(id) \
+	SE_COMMON_REG_LIST_BASE(id),\
+	SRI(AFMT_AVI_INFO0, DIG, id), \
+	SRI(AFMT_AVI_INFO1, DIG, id), \
+	SRI(AFMT_AVI_INFO2, DIG, id), \
+	SRI(AFMT_AVI_INFO3, DIG, id)
+
+#define SE_COMMON_REG_LIST_BASE(id) \
+	SRI(AFMT_GENERIC_0, DIG, id), \
+	SRI(AFMT_GENERIC_1, DIG, id), \
+	SRI(AFMT_GENERIC_2, DIG, id), \
+	SRI(AFMT_GENERIC_3, DIG, id), \
+	SRI(AFMT_GENERIC_4, DIG, id), \
+	SRI(AFMT_GENERIC_5, DIG, id), \
+	SRI(AFMT_GENERIC_6, DIG, id), \
+	SRI(AFMT_GENERIC_7, DIG, id), \
+	SRI(AFMT_GENERIC_HDR, DIG, id), \
+	SRI(AFMT_INFOFRAME_CONTROL0, DIG, id), \
+	SRI(AFMT_VBI_PACKET_CONTROL, DIG, id), \
+	SRI(AFMT_AUDIO_PACKET_CONTROL, DIG, id), \
+	SRI(AFMT_AUDIO_PACKET_CONTROL2, DIG, id), \
+	SRI(AFMT_AUDIO_SRC_CONTROL, DIG, id), \
+	SRI(AFMT_60958_0, DIG, id), \
+	SRI(AFMT_60958_1, DIG, id), \
+	SRI(AFMT_60958_2, DIG, id), \
+	SRI(DIG_FE_CNTL, DIG, id), \
+	SRI(HDMI_CONTROL, DIG, id), \
+	SRI(HDMI_GC, DIG, id), \
+	SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
+	SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
+	SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \
+	SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \
+	SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \
+	SRI(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\
+	SRI(HDMI_ACR_PACKET_CONTROL, DIG, id),\
+	SRI(HDMI_ACR_32_0, DIG, id),\
+	SRI(HDMI_ACR_32_1, DIG, id),\
+	SRI(HDMI_ACR_44_0, DIG, id),\
+	SRI(HDMI_ACR_44_1, DIG, id),\
+	SRI(HDMI_ACR_48_0, DIG, id),\
+	SRI(HDMI_ACR_48_1, DIG, id),\
+	SRI(TMDS_CNTL, DIG, id), \
+	SRI(DP_MSE_RATE_CNTL, DP, id), \
+	SRI(DP_MSE_RATE_UPDATE, DP, id), \
+	SRI(DP_PIXEL_FORMAT, DP, id), \
+	SRI(DP_SEC_CNTL, DP, id), \
+	SRI(DP_STEER_FIFO, DP, id), \
+	SRI(DP_VID_M, DP, id), \
+	SRI(DP_VID_N, DP, id), \
+	SRI(DP_VID_STREAM_CNTL, DP, id), \
+	SRI(DP_VID_TIMING, DP, id), \
+	SRI(DP_SEC_AUD_N, DP, id), \
+	SRI(DP_SEC_TIMESTAMP, DP, id)
+
+#define SE_COMMON_REG_LIST(id)\
+	SE_COMMON_REG_LIST_DCE_BASE(id), \
+	SRI(AFMT_CNTL, DIG, id)
+
+#define SE_SF(reg_name, field_name, post_fix)\
+	.field_name = reg_name ## __ ## field_name ## post_fix
+
+#define SE_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
+	SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, mask_sh),\
+	SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC0_UPDATE, mask_sh),\
+	SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC2_UPDATE, mask_sh),\
+	SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB0, mask_sh),\
+	SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB1, mask_sh),\
+	SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB2, mask_sh),\
+	SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB3, mask_sh),\
+	SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\
+	SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\
+	SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_LINE, mask_sh),\
+	SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\
+	SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\
+	SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_LINE, mask_sh),\
+	SE_SF(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
+	SE_SF(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
+	SE_SF(DP_PIXEL_FORMAT, DP_DYN_RANGE, mask_sh),\
+	SE_SF(DP_PIXEL_FORMAT, DP_YCBCR_RANGE, mask_sh),\
+	SE_SF(HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\
+	SE_SF(HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\
+	SE_SF(HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\
+	SE_SF(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\
+	SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
+	SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
+	SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+	SE_SF(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
+	SE_SF(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
+	SE_SF(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
+	SE_SF(HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
+	SE_SF(DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\
+	SE_SF(DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\
+	SE_SF(DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\
+	SE_SF(AFMT_AVI_INFO3, AFMT_AVI_INFO_VERSION, mask_sh),\
+	SE_SF(HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, mask_sh),\
+	SE_SF(HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, mask_sh),\
+	SE_SF(HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, mask_sh),\
+	SE_SF(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\
+	SE_SF(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\
+	SE_SF(DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\
+	SE_SF(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\
+	SE_SF(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\
+	SE_SF(DP_SEC_CNTL, DP_SEC_AVI_ENABLE, mask_sh),\
+	SE_SF(DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\
+	SE_SF(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\
+	SE_SF(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
+	SE_SF(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\
+	SE_SF(DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\
+	SE_SF(DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\
+	SE_SF(DP_VID_N, DP_VID_N, mask_sh),\
+	SE_SF(DP_VID_M, DP_VID_M, mask_sh),\
+	SE_SF(DIG_FE_CNTL, DIG_START, mask_sh),\
+	SE_SF(AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\
+	SE_SF(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\
+	SE_SF(HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, mask_sh),\
+	SE_SF(HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\
+	SE_SF(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\
+	SE_SF(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\
+	SE_SF(AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\
+	SE_SF(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\
+	SE_SF(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\
+	SE_SF(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\
+	SE_SF(HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\
+	SE_SF(HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\
+	SE_SF(HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\
+	SE_SF(HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\
+	SE_SF(HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\
+	SE_SF(HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\
+	SE_SF(AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\
+	SE_SF(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\
+	SE_SF(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\
+	SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\
+	SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\
+	SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\
+	SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\
+	SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
+	SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
+	SE_SF(DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
+	SE_SF(DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
+	SE_SF(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
+	SE_SF(DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
+	SE_SF(DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\
+	SE_SF(DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\
+	SE_SF(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh)\
+	SE_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE80_100(mask_sh)\
+	SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\
+	SE_SF(TMDS_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+	SE_SF(TMDS_CNTL, TMDS_COLOR_FORMAT, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE110(mask_sh)\
+	SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\
+	SE_SF(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
+	SE_SF(HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
+	SE_SF(HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
+	SE_SF(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+	SE_SF(DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE112(mask_sh)\
+	SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\
+	SE_SF(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
+	SE_SF(HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
+	SE_SF(HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
+	SE_SF(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+	SE_SF(DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
+	SE_SF(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, mask_sh)
+
+struct dce_stream_encoder_shift {
+	uint8_t AFMT_GENERIC_INDEX;
+	uint8_t AFMT_GENERIC0_UPDATE;
+	uint8_t AFMT_GENERIC2_UPDATE;
+	uint8_t AFMT_GENERIC_HB0;
+	uint8_t AFMT_GENERIC_HB1;
+	uint8_t AFMT_GENERIC_HB2;
+	uint8_t AFMT_GENERIC_HB3;
+	uint8_t AFMT_GENERIC_LOCK_STATUS;
+	uint8_t AFMT_GENERIC_CONFLICT;
+	uint8_t AFMT_GENERIC_CONFLICT_CLR;
+	uint8_t AFMT_GENERIC0_FRAME_UPDATE_PENDING;
+	uint8_t AFMT_GENERIC1_FRAME_UPDATE_PENDING;
+	uint8_t AFMT_GENERIC2_FRAME_UPDATE_PENDING;
+	uint8_t AFMT_GENERIC3_FRAME_UPDATE_PENDING;
+	uint8_t AFMT_GENERIC4_FRAME_UPDATE_PENDING;
+	uint8_t AFMT_GENERIC5_FRAME_UPDATE_PENDING;
+	uint8_t AFMT_GENERIC6_FRAME_UPDATE_PENDING;
+	uint8_t AFMT_GENERIC7_FRAME_UPDATE_PENDING;
+	uint8_t AFMT_GENERIC0_FRAME_UPDATE;
+	uint8_t AFMT_GENERIC1_FRAME_UPDATE;
+	uint8_t AFMT_GENERIC2_FRAME_UPDATE;
+	uint8_t AFMT_GENERIC3_FRAME_UPDATE;
+	uint8_t AFMT_GENERIC4_FRAME_UPDATE;
+	uint8_t AFMT_GENERIC5_FRAME_UPDATE;
+	uint8_t AFMT_GENERIC6_FRAME_UPDATE;
+	uint8_t AFMT_GENERIC7_FRAME_UPDATE;
+	uint8_t HDMI_GENERIC0_CONT;
+	uint8_t HDMI_GENERIC0_SEND;
+	uint8_t HDMI_GENERIC0_LINE;
+	uint8_t HDMI_GENERIC1_CONT;
+	uint8_t HDMI_GENERIC1_SEND;
+	uint8_t HDMI_GENERIC1_LINE;
+	uint8_t DP_PIXEL_ENCODING;
+	uint8_t DP_COMPONENT_DEPTH;
+	uint8_t DP_DYN_RANGE;
+	uint8_t DP_YCBCR_RANGE;
+	uint8_t HDMI_PACKET_GEN_VERSION;
+	uint8_t HDMI_KEEPOUT_MODE;
+	uint8_t HDMI_DEEP_COLOR_ENABLE;
+	uint8_t HDMI_CLOCK_CHANNEL_RATE;
+	uint8_t HDMI_DEEP_COLOR_DEPTH;
+	uint8_t HDMI_GC_CONT;
+	uint8_t HDMI_GC_SEND;
+	uint8_t HDMI_NULL_SEND;
+	uint8_t HDMI_DATA_SCRAMBLE_EN;
+	uint8_t HDMI_AUDIO_INFO_SEND;
+	uint8_t AFMT_AUDIO_INFO_UPDATE;
+	uint8_t HDMI_AUDIO_INFO_LINE;
+	uint8_t HDMI_GC_AVMUTE;
+	uint8_t DP_MSE_RATE_X;
+	uint8_t DP_MSE_RATE_Y;
+	uint8_t DP_MSE_RATE_UPDATE_PENDING;
+	uint8_t AFMT_AVI_INFO_VERSION;
+	uint8_t HDMI_AVI_INFO_SEND;
+	uint8_t HDMI_AVI_INFO_CONT;
+	uint8_t HDMI_AVI_INFO_LINE;
+	uint8_t DP_SEC_GSP0_ENABLE;
+	uint8_t DP_SEC_STREAM_ENABLE;
+	uint8_t DP_SEC_GSP1_ENABLE;
+	uint8_t DP_SEC_GSP2_ENABLE;
+	uint8_t DP_SEC_GSP3_ENABLE;
+	uint8_t DP_SEC_GSP4_ENABLE;
+	uint8_t DP_SEC_GSP5_ENABLE;
+	uint8_t DP_SEC_GSP6_ENABLE;
+	uint8_t DP_SEC_GSP7_ENABLE;
+	uint8_t DP_SEC_AVI_ENABLE;
+	uint8_t DP_SEC_MPG_ENABLE;
+	uint8_t DP_VID_STREAM_DIS_DEFER;
+	uint8_t DP_VID_STREAM_ENABLE;
+	uint8_t DP_VID_STREAM_STATUS;
+	uint8_t DP_STEER_FIFO_RESET;
+	uint8_t DP_VID_M_N_GEN_EN;
+	uint8_t DP_VID_N;
+	uint8_t DP_VID_M;
+	uint8_t DIG_START;
+	uint8_t AFMT_AUDIO_SRC_SELECT;
+	uint8_t AFMT_AUDIO_CHANNEL_ENABLE;
+	uint8_t HDMI_AUDIO_PACKETS_PER_LINE;
+	uint8_t HDMI_AUDIO_DELAY_EN;
+	uint8_t AFMT_60958_CS_UPDATE;
+	uint8_t AFMT_AUDIO_LAYOUT_OVRD;
+	uint8_t AFMT_60958_OSF_OVRD;
+	uint8_t HDMI_ACR_AUTO_SEND;
+	uint8_t HDMI_ACR_SOURCE;
+	uint8_t HDMI_ACR_AUDIO_PRIORITY;
+	uint8_t HDMI_ACR_CTS_32;
+	uint8_t HDMI_ACR_N_32;
+	uint8_t HDMI_ACR_CTS_44;
+	uint8_t HDMI_ACR_N_44;
+	uint8_t HDMI_ACR_CTS_48;
+	uint8_t HDMI_ACR_N_48;
+	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_L;
+	uint8_t AFMT_60958_CS_CLOCK_ACCURACY;
+	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_R;
+	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_2;
+	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_3;
+	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_4;
+	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_5;
+	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_6;
+	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_7;
+	uint8_t DP_SEC_AUD_N;
+	uint8_t DP_SEC_TIMESTAMP_MODE;
+	uint8_t DP_SEC_ASP_ENABLE;
+	uint8_t DP_SEC_ATP_ENABLE;
+	uint8_t DP_SEC_AIP_ENABLE;
+	uint8_t DP_SEC_ACM_ENABLE;
+	uint8_t AFMT_AUDIO_SAMPLE_SEND;
+	uint8_t AFMT_AUDIO_CLOCK_EN;
+	uint8_t TMDS_PIXEL_ENCODING;
+	uint8_t TMDS_COLOR_FORMAT;
+	uint8_t DP_DB_DISABLE;
+	uint8_t DP_MSA_MISC0;
+	uint8_t DP_MSA_HTOTAL;
+	uint8_t DP_MSA_VTOTAL;
+	uint8_t DP_MSA_HSTART;
+	uint8_t DP_MSA_VSTART;
+	uint8_t DP_MSA_HSYNCWIDTH;
+	uint8_t DP_MSA_HSYNCPOLARITY;
+	uint8_t DP_MSA_VSYNCWIDTH;
+	uint8_t DP_MSA_VSYNCPOLARITY;
+	uint8_t DP_MSA_HWIDTH;
+	uint8_t DP_MSA_VHEIGHT;
+	uint8_t HDMI_DB_DISABLE;
+	uint8_t DP_VID_N_MUL;
+	uint8_t DP_VID_M_DOUBLE_VALUE_EN;
+};
+
+struct dce_stream_encoder_mask {
+	uint32_t AFMT_GENERIC_INDEX;
+	uint32_t AFMT_GENERIC0_UPDATE;
+	uint32_t AFMT_GENERIC2_UPDATE;
+	uint32_t AFMT_GENERIC_HB0;
+	uint32_t AFMT_GENERIC_HB1;
+	uint32_t AFMT_GENERIC_HB2;
+	uint32_t AFMT_GENERIC_HB3;
+	uint32_t AFMT_GENERIC_LOCK_STATUS;
+	uint32_t AFMT_GENERIC_CONFLICT;
+	uint32_t AFMT_GENERIC_CONFLICT_CLR;
+	uint32_t AFMT_GENERIC0_FRAME_UPDATE_PENDING;
+	uint32_t AFMT_GENERIC1_FRAME_UPDATE_PENDING;
+	uint32_t AFMT_GENERIC2_FRAME_UPDATE_PENDING;
+	uint32_t AFMT_GENERIC3_FRAME_UPDATE_PENDING;
+	uint32_t AFMT_GENERIC4_FRAME_UPDATE_PENDING;
+	uint32_t AFMT_GENERIC5_FRAME_UPDATE_PENDING;
+	uint32_t AFMT_GENERIC6_FRAME_UPDATE_PENDING;
+	uint32_t AFMT_GENERIC7_FRAME_UPDATE_PENDING;
+	uint32_t AFMT_GENERIC0_FRAME_UPDATE;
+	uint32_t AFMT_GENERIC1_FRAME_UPDATE;
+	uint32_t AFMT_GENERIC2_FRAME_UPDATE;
+	uint32_t AFMT_GENERIC3_FRAME_UPDATE;
+	uint32_t AFMT_GENERIC4_FRAME_UPDATE;
+	uint32_t AFMT_GENERIC5_FRAME_UPDATE;
+	uint32_t AFMT_GENERIC6_FRAME_UPDATE;
+	uint32_t AFMT_GENERIC7_FRAME_UPDATE;
+	uint32_t HDMI_GENERIC0_CONT;
+	uint32_t HDMI_GENERIC0_SEND;
+	uint32_t HDMI_GENERIC0_LINE;
+	uint32_t HDMI_GENERIC1_CONT;
+	uint32_t HDMI_GENERIC1_SEND;
+	uint32_t HDMI_GENERIC1_LINE;
+	uint32_t DP_PIXEL_ENCODING;
+	uint32_t DP_COMPONENT_DEPTH;
+	uint32_t DP_DYN_RANGE;
+	uint32_t DP_YCBCR_RANGE;
+	uint32_t HDMI_PACKET_GEN_VERSION;
+	uint32_t HDMI_KEEPOUT_MODE;
+	uint32_t HDMI_DEEP_COLOR_ENABLE;
+	uint32_t HDMI_CLOCK_CHANNEL_RATE;
+	uint32_t HDMI_DEEP_COLOR_DEPTH;
+	uint32_t HDMI_GC_CONT;
+	uint32_t HDMI_GC_SEND;
+	uint32_t HDMI_NULL_SEND;
+	uint32_t HDMI_DATA_SCRAMBLE_EN;
+	uint32_t HDMI_AUDIO_INFO_SEND;
+	uint32_t AFMT_AUDIO_INFO_UPDATE;
+	uint32_t HDMI_AUDIO_INFO_LINE;
+	uint32_t HDMI_GC_AVMUTE;
+	uint32_t DP_MSE_RATE_X;
+	uint32_t DP_MSE_RATE_Y;
+	uint32_t DP_MSE_RATE_UPDATE_PENDING;
+	uint32_t AFMT_AVI_INFO_VERSION;
+	uint32_t HDMI_AVI_INFO_SEND;
+	uint32_t HDMI_AVI_INFO_CONT;
+	uint32_t HDMI_AVI_INFO_LINE;
+	uint32_t DP_SEC_GSP0_ENABLE;
+	uint32_t DP_SEC_STREAM_ENABLE;
+	uint32_t DP_SEC_GSP1_ENABLE;
+	uint32_t DP_SEC_GSP2_ENABLE;
+	uint32_t DP_SEC_GSP3_ENABLE;
+	uint32_t DP_SEC_GSP4_ENABLE;
+	uint32_t DP_SEC_GSP5_ENABLE;
+	uint32_t DP_SEC_GSP6_ENABLE;
+	uint32_t DP_SEC_GSP7_ENABLE;
+	uint32_t DP_SEC_AVI_ENABLE;
+	uint32_t DP_SEC_MPG_ENABLE;
+	uint32_t DP_VID_STREAM_DIS_DEFER;
+	uint32_t DP_VID_STREAM_ENABLE;
+	uint32_t DP_VID_STREAM_STATUS;
+	uint32_t DP_STEER_FIFO_RESET;
+	uint32_t DP_VID_M_N_GEN_EN;
+	uint32_t DP_VID_N;
+	uint32_t DP_VID_M;
+	uint32_t DIG_START;
+	uint32_t AFMT_AUDIO_SRC_SELECT;
+	uint32_t AFMT_AUDIO_CHANNEL_ENABLE;
+	uint32_t HDMI_AUDIO_PACKETS_PER_LINE;
+	uint32_t HDMI_AUDIO_DELAY_EN;
+	uint32_t AFMT_60958_CS_UPDATE;
+	uint32_t AFMT_AUDIO_LAYOUT_OVRD;
+	uint32_t AFMT_60958_OSF_OVRD;
+	uint32_t HDMI_ACR_AUTO_SEND;
+	uint32_t HDMI_ACR_SOURCE;
+	uint32_t HDMI_ACR_AUDIO_PRIORITY;
+	uint32_t HDMI_ACR_CTS_32;
+	uint32_t HDMI_ACR_N_32;
+	uint32_t HDMI_ACR_CTS_44;
+	uint32_t HDMI_ACR_N_44;
+	uint32_t HDMI_ACR_CTS_48;
+	uint32_t HDMI_ACR_N_48;
+	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_L;
+	uint32_t AFMT_60958_CS_CLOCK_ACCURACY;
+	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_R;
+	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_2;
+	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_3;
+	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_4;
+	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_5;
+	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_6;
+	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_7;
+	uint32_t DP_SEC_AUD_N;
+	uint32_t DP_SEC_TIMESTAMP_MODE;
+	uint32_t DP_SEC_ASP_ENABLE;
+	uint32_t DP_SEC_ATP_ENABLE;
+	uint32_t DP_SEC_AIP_ENABLE;
+	uint32_t DP_SEC_ACM_ENABLE;
+	uint32_t AFMT_AUDIO_SAMPLE_SEND;
+	uint32_t AFMT_AUDIO_CLOCK_EN;
+	uint32_t TMDS_PIXEL_ENCODING;
+	uint32_t TMDS_COLOR_FORMAT;
+	uint32_t DP_DB_DISABLE;
+	uint32_t DP_MSA_MISC0;
+	uint32_t DP_MSA_HTOTAL;
+	uint32_t DP_MSA_VTOTAL;
+	uint32_t DP_MSA_HSTART;
+	uint32_t DP_MSA_VSTART;
+	uint32_t DP_MSA_HSYNCWIDTH;
+	uint32_t DP_MSA_HSYNCPOLARITY;
+	uint32_t DP_MSA_VSYNCWIDTH;
+	uint32_t DP_MSA_VSYNCPOLARITY;
+	uint32_t DP_MSA_HWIDTH;
+	uint32_t DP_MSA_VHEIGHT;
+	uint32_t HDMI_DB_DISABLE;
+	uint32_t DP_VID_N_MUL;
+	uint32_t DP_VID_M_DOUBLE_VALUE_EN;
+};
+
+struct dce110_stream_enc_registers {
+	uint32_t AFMT_CNTL;
+	uint32_t AFMT_AVI_INFO0;
+	uint32_t AFMT_AVI_INFO1;
+	uint32_t AFMT_AVI_INFO2;
+	uint32_t AFMT_AVI_INFO3;
+	uint32_t AFMT_GENERIC_0;
+	uint32_t AFMT_GENERIC_1;
+	uint32_t AFMT_GENERIC_2;
+	uint32_t AFMT_GENERIC_3;
+	uint32_t AFMT_GENERIC_4;
+	uint32_t AFMT_GENERIC_5;
+	uint32_t AFMT_GENERIC_6;
+	uint32_t AFMT_GENERIC_7;
+	uint32_t AFMT_GENERIC_HDR;
+	uint32_t AFMT_INFOFRAME_CONTROL0;
+	uint32_t AFMT_VBI_PACKET_CONTROL;
+	uint32_t AFMT_VBI_PACKET_CONTROL1;
+	uint32_t AFMT_AUDIO_PACKET_CONTROL;
+	uint32_t AFMT_AUDIO_PACKET_CONTROL2;
+	uint32_t AFMT_AUDIO_SRC_CONTROL;
+	uint32_t AFMT_60958_0;
+	uint32_t AFMT_60958_1;
+	uint32_t AFMT_60958_2;
+	uint32_t DIG_FE_CNTL;
+	uint32_t DP_MSE_RATE_CNTL;
+	uint32_t DP_MSE_RATE_UPDATE;
+	uint32_t DP_PIXEL_FORMAT;
+	uint32_t DP_SEC_CNTL;
+	uint32_t DP_STEER_FIFO;
+	uint32_t DP_VID_M;
+	uint32_t DP_VID_N;
+	uint32_t DP_VID_STREAM_CNTL;
+	uint32_t DP_VID_TIMING;
+	uint32_t DP_SEC_AUD_N;
+	uint32_t DP_SEC_TIMESTAMP;
+	uint32_t HDMI_CONTROL;
+	uint32_t HDMI_GC;
+	uint32_t HDMI_GENERIC_PACKET_CONTROL0;
+	uint32_t HDMI_GENERIC_PACKET_CONTROL1;
+	uint32_t HDMI_GENERIC_PACKET_CONTROL2;
+	uint32_t HDMI_GENERIC_PACKET_CONTROL3;
+	uint32_t HDMI_INFOFRAME_CONTROL0;
+	uint32_t HDMI_INFOFRAME_CONTROL1;
+	uint32_t HDMI_VBI_PACKET_CONTROL;
+	uint32_t HDMI_AUDIO_PACKET_CONTROL;
+	uint32_t HDMI_ACR_PACKET_CONTROL;
+	uint32_t HDMI_ACR_32_0;
+	uint32_t HDMI_ACR_32_1;
+	uint32_t HDMI_ACR_44_0;
+	uint32_t HDMI_ACR_44_1;
+	uint32_t HDMI_ACR_48_0;
+	uint32_t HDMI_ACR_48_1;
+	uint32_t TMDS_CNTL;
+};
+
+struct dce110_stream_encoder {
+	struct stream_encoder base;
+	const struct dce110_stream_enc_registers *regs;
+	const struct dce_stream_encoder_shift *se_shift;
+	const struct dce_stream_encoder_mask *se_mask;
+};
+
+bool dce110_stream_encoder_construct(
+	struct dce110_stream_encoder *enc110,
+	struct dc_context *ctx,
+	struct dc_bios *bp,
+	enum engine_id eng_id,
+	const struct dce110_stream_enc_registers *regs,
+	const struct dce_stream_encoder_shift *se_shift,
+	const struct dce_stream_encoder_mask *se_mask);
+
+
+void dce110_se_audio_mute_control(
+	struct stream_encoder *enc, bool mute);
+
+void dce110_se_dp_audio_setup(
+	struct stream_encoder *enc,
+	unsigned int az_inst,
+	struct audio_info *info);
+
+void dce110_se_dp_audio_enable(
+		struct stream_encoder *enc);
+
+void dce110_se_dp_audio_disable(
+		struct stream_encoder *enc);
+
+void dce110_se_hdmi_audio_setup(
+	struct stream_encoder *enc,
+	unsigned int az_inst,
+	struct audio_info *info,
+	struct audio_crtc_info *audio_crtc_info);
+
+void dce110_se_hdmi_audio_disable(
+	struct stream_encoder *enc);
+
+#endif /* __DC_STREAM_ENCODER_DCE110_H__ */

+ 1002 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_transform.c

@@ -0,0 +1,1002 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_transform.h"
+#include "reg_helper.h"
+#include "opp.h"
+#include "basics/conversion.h"
+
+#define REG(reg) \
+	(xfm_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+	xfm_dce->xfm_shift->field_name, xfm_dce->xfm_mask->field_name
+
+#define CTX \
+	xfm_dce->base.ctx
+
+#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
+#define GAMUT_MATRIX_SIZE 12
+#define SCL_PHASES 16
+
+enum dcp_out_trunc_round_mode {
+	DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
+	DCP_OUT_TRUNC_ROUND_MODE_ROUND
+};
+
+enum dcp_out_trunc_round_depth {
+	DCP_OUT_TRUNC_ROUND_DEPTH_14BIT,
+	DCP_OUT_TRUNC_ROUND_DEPTH_13BIT,
+	DCP_OUT_TRUNC_ROUND_DEPTH_12BIT,
+	DCP_OUT_TRUNC_ROUND_DEPTH_11BIT,
+	DCP_OUT_TRUNC_ROUND_DEPTH_10BIT,
+	DCP_OUT_TRUNC_ROUND_DEPTH_9BIT,
+	DCP_OUT_TRUNC_ROUND_DEPTH_8BIT
+};
+
+/*  defines the various methods of bit reduction available for use */
+enum dcp_bit_depth_reduction_mode {
+	DCP_BIT_DEPTH_REDUCTION_MODE_DITHER,
+	DCP_BIT_DEPTH_REDUCTION_MODE_ROUND,
+	DCP_BIT_DEPTH_REDUCTION_MODE_TRUNCATE,
+	DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED,
+	DCP_BIT_DEPTH_REDUCTION_MODE_INVALID
+};
+
+enum dcp_spatial_dither_mode {
+	DCP_SPATIAL_DITHER_MODE_AAAA,
+	DCP_SPATIAL_DITHER_MODE_A_AA_A,
+	DCP_SPATIAL_DITHER_MODE_AABBAABB,
+	DCP_SPATIAL_DITHER_MODE_AABBCCAABBCC,
+	DCP_SPATIAL_DITHER_MODE_INVALID
+};
+
+enum dcp_spatial_dither_depth {
+	DCP_SPATIAL_DITHER_DEPTH_30BPP,
+	DCP_SPATIAL_DITHER_DEPTH_24BPP
+};
+
+static bool setup_scaling_configuration(
+	struct dce_transform *xfm_dce,
+	const struct scaler_data *data)
+{
+	struct dc_context *ctx = xfm_dce->base.ctx;
+
+	if (data->taps.h_taps + data->taps.v_taps <= 2) {
+		/* Set bypass */
+		REG_UPDATE_2(SCL_MODE, SCL_MODE, 0, SCL_PSCL_EN, 0);
+		return false;
+	}
+
+	REG_SET_2(SCL_TAP_CONTROL, 0,
+			SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1,
+			SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1);
+
+	if (data->format <= PIXEL_FORMAT_GRPH_END)
+		REG_UPDATE_2(SCL_MODE, SCL_MODE, 1, SCL_PSCL_EN, 1);
+	else
+		REG_UPDATE_2(SCL_MODE, SCL_MODE, 2, SCL_PSCL_EN, 1);
+
+	/* 1 - Replace out of bound pixels with edge */
+	REG_SET(SCL_CONTROL, 0, SCL_BOUNDARY_MODE, 1);
+
+	return true;
+}
+
+static void program_overscan(
+		struct dce_transform *xfm_dce,
+		const struct scaler_data *data)
+{
+	int overscan_right = data->h_active
+			- data->recout.x - data->recout.width;
+	int overscan_bottom = data->v_active
+			- data->recout.y - data->recout.height;
+
+	if (overscan_right < 0) {
+		BREAK_TO_DEBUGGER();
+		overscan_right = 0;
+	}
+	if (overscan_bottom < 0) {
+		BREAK_TO_DEBUGGER();
+		overscan_bottom = 0;
+	}
+
+	REG_SET_2(EXT_OVERSCAN_LEFT_RIGHT, 0,
+			EXT_OVERSCAN_LEFT, data->recout.x,
+			EXT_OVERSCAN_RIGHT, overscan_right);
+	REG_SET_2(EXT_OVERSCAN_TOP_BOTTOM, 0,
+			EXT_OVERSCAN_TOP, data->recout.y,
+			EXT_OVERSCAN_BOTTOM, overscan_bottom);
+}
+
+static void program_multi_taps_filter(
+	struct dce_transform *xfm_dce,
+	int taps,
+	const uint16_t *coeffs,
+	enum ram_filter_type filter_type)
+{
+	int phase, pair;
+	int array_idx = 0;
+	int taps_pairs = (taps + 1) / 2;
+	int phases_to_program = SCL_PHASES / 2 + 1;
+
+	uint32_t power_ctl = 0;
+
+	if (!coeffs)
+		return;
+
+	/*We need to disable power gating on coeff memory to do programming*/
+	if (REG(DCFE_MEM_PWR_CTRL)) {
+		power_ctl = REG_READ(DCFE_MEM_PWR_CTRL);
+		REG_SET(DCFE_MEM_PWR_CTRL, power_ctl, SCL_COEFF_MEM_PWR_DIS, 1);
+
+		REG_WAIT(DCFE_MEM_PWR_STATUS, SCL_COEFF_MEM_PWR_STATE, 0, 1, 10);
+	}
+	for (phase = 0; phase < phases_to_program; phase++) {
+		/*we always program N/2 + 1 phases, total phases N, but N/2-1 are just mirror
+		phase 0 is unique and phase N/2 is unique if N is even*/
+		for (pair = 0; pair < taps_pairs; pair++) {
+			uint16_t odd_coeff = 0;
+			uint16_t even_coeff = coeffs[array_idx];
+
+			REG_SET_3(SCL_COEF_RAM_SELECT, 0,
+					SCL_C_RAM_FILTER_TYPE, filter_type,
+					SCL_C_RAM_PHASE, phase,
+					SCL_C_RAM_TAP_PAIR_IDX, pair);
+
+			if (taps % 2 && pair == taps_pairs - 1)
+				array_idx++;
+			else {
+				odd_coeff = coeffs[array_idx + 1];
+				array_idx += 2;
+			}
+
+			REG_SET_4(SCL_COEF_RAM_TAP_DATA, 0,
+					SCL_C_RAM_EVEN_TAP_COEF_EN, 1,
+					SCL_C_RAM_EVEN_TAP_COEF, even_coeff,
+					SCL_C_RAM_ODD_TAP_COEF_EN, 1,
+					SCL_C_RAM_ODD_TAP_COEF, odd_coeff);
+		}
+	}
+
+	/*We need to restore power gating on coeff memory to initial state*/
+	if (REG(DCFE_MEM_PWR_CTRL))
+		REG_WRITE(DCFE_MEM_PWR_CTRL, power_ctl);
+}
+
+static void program_viewport(
+	struct dce_transform *xfm_dce,
+	const struct rect *view_port)
+{
+	REG_SET_2(VIEWPORT_START, 0,
+			VIEWPORT_X_START, view_port->x,
+			VIEWPORT_Y_START, view_port->y);
+
+	REG_SET_2(VIEWPORT_SIZE, 0,
+			VIEWPORT_HEIGHT, view_port->height,
+			VIEWPORT_WIDTH, view_port->width);
+
+	/* TODO: add stereo support */
+}
+
+static void calculate_inits(
+	struct dce_transform *xfm_dce,
+	const struct scaler_data *data,
+	struct scl_ratios_inits *inits)
+{
+	struct fixed31_32 h_init;
+	struct fixed31_32 v_init;
+
+	inits->h_int_scale_ratio =
+		dal_fixed31_32_u2d19(data->ratios.horz) << 5;
+	inits->v_int_scale_ratio =
+		dal_fixed31_32_u2d19(data->ratios.vert) << 5;
+
+	h_init =
+		dal_fixed31_32_div_int(
+			dal_fixed31_32_add(
+				data->ratios.horz,
+				dal_fixed31_32_from_int(data->taps.h_taps + 1)),
+				2);
+	inits->h_init.integer = dal_fixed31_32_floor(h_init);
+	inits->h_init.fraction = dal_fixed31_32_u0d19(h_init) << 5;
+
+	v_init =
+		dal_fixed31_32_div_int(
+			dal_fixed31_32_add(
+				data->ratios.vert,
+				dal_fixed31_32_from_int(data->taps.v_taps + 1)),
+				2);
+	inits->v_init.integer = dal_fixed31_32_floor(v_init);
+	inits->v_init.fraction = dal_fixed31_32_u0d19(v_init) << 5;
+}
+
+static void program_scl_ratios_inits(
+	struct dce_transform *xfm_dce,
+	struct scl_ratios_inits *inits)
+{
+
+	REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
+			SCL_H_SCALE_RATIO, inits->h_int_scale_ratio);
+
+	REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
+			SCL_V_SCALE_RATIO, inits->v_int_scale_ratio);
+
+	REG_SET_2(SCL_HORZ_FILTER_INIT, 0,
+			SCL_H_INIT_INT, inits->h_init.integer,
+			SCL_H_INIT_FRAC, inits->h_init.fraction);
+
+	REG_SET_2(SCL_VERT_FILTER_INIT, 0,
+			SCL_V_INIT_INT, inits->v_init.integer,
+			SCL_V_INIT_FRAC, inits->v_init.fraction);
+
+	REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0);
+}
+
+static const uint16_t *get_filter_coeffs_16p(int taps, struct fixed31_32 ratio)
+{
+	if (taps == 4)
+		return get_filter_4tap_16p(ratio);
+	else if (taps == 3)
+		return get_filter_3tap_16p(ratio);
+	else if (taps == 2)
+		return filter_2tap_16p;
+	else if (taps == 1)
+		return NULL;
+	else {
+		/* should never happen, bug */
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+}
+
+static void dce_transform_set_scaler(
+	struct transform *xfm,
+	const struct scaler_data *data)
+{
+	struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+	bool is_scaling_required;
+	bool filter_updated = false;
+	const uint16_t *coeffs_v, *coeffs_h;
+
+	/*Use all three pieces of memory always*/
+	REG_SET_2(LB_MEMORY_CTRL, 0,
+			LB_MEMORY_CONFIG, 0,
+			LB_MEMORY_SIZE, xfm_dce->lb_memory_size);
+
+	/* 1. Program overscan */
+	program_overscan(xfm_dce, data);
+
+	/* 2. Program taps and configuration */
+	is_scaling_required = setup_scaling_configuration(xfm_dce, data);
+
+	if (is_scaling_required) {
+		/* 3. Calculate and program ratio, filter initialization */
+		struct scl_ratios_inits inits = { 0 };
+
+		calculate_inits(xfm_dce, data, &inits);
+
+		program_scl_ratios_inits(xfm_dce, &inits);
+
+		coeffs_v = get_filter_coeffs_16p(data->taps.v_taps, data->ratios.vert);
+		coeffs_h = get_filter_coeffs_16p(data->taps.h_taps, data->ratios.horz);
+
+		if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) {
+			/* 4. Program vertical filters */
+			if (xfm_dce->filter_v == NULL)
+				REG_SET(SCL_VERT_FILTER_CONTROL, 0,
+						SCL_V_2TAP_HARDCODE_COEF_EN, 0);
+			program_multi_taps_filter(
+					xfm_dce,
+					data->taps.v_taps,
+					coeffs_v,
+					FILTER_TYPE_RGB_Y_VERTICAL);
+			program_multi_taps_filter(
+					xfm_dce,
+					data->taps.v_taps,
+					coeffs_v,
+					FILTER_TYPE_ALPHA_VERTICAL);
+
+			/* 5. Program horizontal filters */
+			if (xfm_dce->filter_h == NULL)
+				REG_SET(SCL_HORZ_FILTER_CONTROL, 0,
+						SCL_H_2TAP_HARDCODE_COEF_EN, 0);
+			program_multi_taps_filter(
+					xfm_dce,
+					data->taps.h_taps,
+					coeffs_h,
+					FILTER_TYPE_RGB_Y_HORIZONTAL);
+			program_multi_taps_filter(
+					xfm_dce,
+					data->taps.h_taps,
+					coeffs_h,
+					FILTER_TYPE_ALPHA_HORIZONTAL);
+
+			xfm_dce->filter_v = coeffs_v;
+			xfm_dce->filter_h = coeffs_h;
+			filter_updated = true;
+		}
+	}
+
+	/* 6. Program the viewport */
+	program_viewport(xfm_dce, &data->viewport);
+
+	/* 7. Set bit to flip to new coefficient memory */
+	if (filter_updated)
+		REG_UPDATE(SCL_UPDATE, SCL_COEF_UPDATE_COMPLETE, 1);
+
+	REG_UPDATE(LB_DATA_FORMAT, ALPHA_EN, data->lb_params.alpha_en);
+}
+
+/*****************************************************************************
+ * set_clamp
+ *
+ * @param depth : bit depth to set the clamp to (should match denorm)
+ *
+ * @brief
+ *     Programs clamp according to panel bit depth.
+ *
+ *******************************************************************************/
+static void set_clamp(
+	struct dce_transform *xfm_dce,
+	enum dc_color_depth depth)
+{
+	int clamp_max = 0;
+
+	/* At the clamp block the data will be MSB aligned, so we set the max
+	 * clamp accordingly.
+	 * For example, the max value for 6 bits MSB aligned (14 bit bus) would
+	 * be "11 1111 0000 0000" in binary, so 0x3F00.
+	 */
+	switch (depth) {
+	case COLOR_DEPTH_666:
+		/* 6bit MSB aligned on 14 bit bus '11 1111 0000 0000' */
+		clamp_max = 0x3F00;
+		break;
+	case COLOR_DEPTH_888:
+		/* 8bit MSB aligned on 14 bit bus '11 1111 1100 0000' */
+		clamp_max = 0x3FC0;
+		break;
+	case COLOR_DEPTH_101010:
+		/* 10bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
+		clamp_max = 0x3FFC;
+		break;
+	case COLOR_DEPTH_121212:
+		/* 12bit MSB aligned on 14 bit bus '11 1111 1111 1111' */
+		clamp_max = 0x3FFF;
+		break;
+	default:
+		clamp_max = 0x3FC0;
+		BREAK_TO_DEBUGGER(); /* Invalid clamp bit depth */
+	}
+	REG_SET_2(OUT_CLAMP_CONTROL_B_CB, 0,
+			OUT_CLAMP_MIN_B_CB, 0,
+			OUT_CLAMP_MAX_B_CB, clamp_max);
+
+	REG_SET_2(OUT_CLAMP_CONTROL_G_Y, 0,
+			OUT_CLAMP_MIN_G_Y, 0,
+			OUT_CLAMP_MAX_G_Y, clamp_max);
+
+	REG_SET_2(OUT_CLAMP_CONTROL_R_CR, 0,
+			OUT_CLAMP_MIN_R_CR, 0,
+			OUT_CLAMP_MAX_R_CR, clamp_max);
+}
+
+/*******************************************************************************
+ * set_round
+ *
+ * @brief
+ *     Programs Round/Truncate
+ *
+ * @param [in] mode  :round or truncate
+ * @param [in] depth :bit depth to round/truncate to
+ OUT_ROUND_TRUNC_MODE 3:0 0xA Output data round or truncate mode
+ POSSIBLE VALUES:
+      00 - truncate to u0.12
+      01 - truncate to u0.11
+      02 - truncate to u0.10
+      03 - truncate to u0.9
+      04 - truncate to u0.8
+      05 - reserved
+      06 - truncate to u0.14
+      07 - truncate to u0.13		set_reg_field_value(
+			value,
+			clamp_max,
+			OUT_CLAMP_CONTROL_R_CR,
+			OUT_CLAMP_MAX_R_CR);
+      08 - round to u0.12
+      09 - round to u0.11
+      10 - round to u0.10
+      11 - round to u0.9
+      12 - round to u0.8
+      13 - reserved
+      14 - round to u0.14
+      15 - round to u0.13
+
+ ******************************************************************************/
+static void set_round(
+	struct dce_transform *xfm_dce,
+	enum dcp_out_trunc_round_mode mode,
+	enum dcp_out_trunc_round_depth depth)
+{
+	int depth_bits = 0;
+	int mode_bit = 0;
+
+	/*  set up bit depth */
+	switch (depth) {
+	case DCP_OUT_TRUNC_ROUND_DEPTH_14BIT:
+		depth_bits = 6;
+		break;
+	case DCP_OUT_TRUNC_ROUND_DEPTH_13BIT:
+		depth_bits = 7;
+		break;
+	case DCP_OUT_TRUNC_ROUND_DEPTH_12BIT:
+		depth_bits = 0;
+		break;
+	case DCP_OUT_TRUNC_ROUND_DEPTH_11BIT:
+		depth_bits = 1;
+		break;
+	case DCP_OUT_TRUNC_ROUND_DEPTH_10BIT:
+		depth_bits = 2;
+		break;
+	case DCP_OUT_TRUNC_ROUND_DEPTH_9BIT:
+		depth_bits = 3;
+		break;
+	case DCP_OUT_TRUNC_ROUND_DEPTH_8BIT:
+		depth_bits = 4;
+		break;
+	default:
+		depth_bits = 4;
+		BREAK_TO_DEBUGGER(); /* Invalid dcp_out_trunc_round_depth */
+	}
+
+	/*  set up round or truncate */
+	switch (mode) {
+	case DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE:
+		mode_bit = 0;
+		break;
+	case DCP_OUT_TRUNC_ROUND_MODE_ROUND:
+		mode_bit = 1;
+		break;
+	default:
+		BREAK_TO_DEBUGGER(); /* Invalid dcp_out_trunc_round_mode */
+	}
+
+	depth_bits |= mode_bit << 3;
+
+	REG_SET(OUT_ROUND_CONTROL, 0, OUT_ROUND_TRUNC_MODE, depth_bits);
+}
+
+/*****************************************************************************
+ * set_dither
+ *
+ * @brief
+ *     Programs Dither
+ *
+ * @param [in] dither_enable        : enable dither
+ * @param [in] dither_mode           : dither mode to set
+ * @param [in] dither_depth          : bit depth to dither to
+ * @param [in] frame_random_enable    : enable frame random
+ * @param [in] rgb_random_enable      : enable rgb random
+ * @param [in] highpass_random_enable : enable highpass random
+ *
+ ******************************************************************************/
+
+static void set_dither(
+	struct dce_transform *xfm_dce,
+	bool dither_enable,
+	enum dcp_spatial_dither_mode dither_mode,
+	enum dcp_spatial_dither_depth dither_depth,
+	bool frame_random_enable,
+	bool rgb_random_enable,
+	bool highpass_random_enable)
+{
+	int dither_depth_bits = 0;
+	int dither_mode_bits = 0;
+
+	switch (dither_mode) {
+	case DCP_SPATIAL_DITHER_MODE_AAAA:
+		dither_mode_bits = 0;
+		break;
+	case DCP_SPATIAL_DITHER_MODE_A_AA_A:
+		dither_mode_bits = 1;
+		break;
+	case DCP_SPATIAL_DITHER_MODE_AABBAABB:
+		dither_mode_bits = 2;
+		break;
+	case DCP_SPATIAL_DITHER_MODE_AABBCCAABBCC:
+		dither_mode_bits = 3;
+		break;
+	default:
+		/* Invalid dcp_spatial_dither_mode */
+		BREAK_TO_DEBUGGER();
+	}
+
+	switch (dither_depth) {
+	case DCP_SPATIAL_DITHER_DEPTH_30BPP:
+		dither_depth_bits = 0;
+		break;
+	case DCP_SPATIAL_DITHER_DEPTH_24BPP:
+		dither_depth_bits = 1;
+		break;
+	default:
+		/* Invalid dcp_spatial_dither_depth */
+		BREAK_TO_DEBUGGER();
+	}
+
+	/*  write the register */
+	REG_SET_6(DCP_SPATIAL_DITHER_CNTL, 0,
+			DCP_SPATIAL_DITHER_EN, dither_enable,
+			DCP_SPATIAL_DITHER_MODE, dither_mode_bits,
+			DCP_SPATIAL_DITHER_DEPTH, dither_depth_bits,
+			DCP_FRAME_RANDOM_ENABLE, frame_random_enable,
+			DCP_RGB_RANDOM_ENABLE, rgb_random_enable,
+			DCP_HIGHPASS_RANDOM_ENABLE, highpass_random_enable);
+}
+
+/*****************************************************************************
+ * dce_transform_bit_depth_reduction_program
+ *
+ * @brief
+ *     Programs the DCP bit depth reduction registers (Clamp, Round/Truncate,
+ *      Dither) for dce
+ *
+ * @param depth : bit depth to set the clamp to (should match denorm)
+ *
+ ******************************************************************************/
+static void program_bit_depth_reduction(
+	struct dce_transform *xfm_dce,
+	enum dc_color_depth depth,
+	const struct bit_depth_reduction_params *bit_depth_params)
+{
+	enum dcp_bit_depth_reduction_mode depth_reduction_mode;
+	enum dcp_spatial_dither_mode spatial_dither_mode;
+	bool frame_random_enable;
+	bool rgb_random_enable;
+	bool highpass_random_enable;
+
+	ASSERT(depth < COLOR_DEPTH_121212); /* Invalid clamp bit depth */
+
+	if (bit_depth_params->flags.SPATIAL_DITHER_ENABLED) {
+		depth_reduction_mode = DCP_BIT_DEPTH_REDUCTION_MODE_DITHER;
+		frame_random_enable = true;
+		rgb_random_enable = true;
+		highpass_random_enable = true;
+
+	} else {
+		depth_reduction_mode = DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED;
+		frame_random_enable = false;
+		rgb_random_enable = false;
+		highpass_random_enable = false;
+	}
+
+	spatial_dither_mode = DCP_SPATIAL_DITHER_MODE_A_AA_A;
+
+	set_clamp(xfm_dce, depth);
+
+	switch (depth_reduction_mode) {
+	case DCP_BIT_DEPTH_REDUCTION_MODE_DITHER:
+		/*  Spatial Dither: Set round/truncate to bypass (12bit),
+		 *  enable Dither (30bpp) */
+		set_round(xfm_dce,
+			DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
+			DCP_OUT_TRUNC_ROUND_DEPTH_12BIT);
+
+		set_dither(xfm_dce, true, spatial_dither_mode,
+			DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
+			rgb_random_enable, highpass_random_enable);
+		break;
+	case DCP_BIT_DEPTH_REDUCTION_MODE_ROUND:
+		/*  Round: Enable round (10bit), disable Dither */
+		set_round(xfm_dce,
+			DCP_OUT_TRUNC_ROUND_MODE_ROUND,
+			DCP_OUT_TRUNC_ROUND_DEPTH_10BIT);
+
+		set_dither(xfm_dce, false, spatial_dither_mode,
+			DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
+			rgb_random_enable, highpass_random_enable);
+		break;
+	case DCP_BIT_DEPTH_REDUCTION_MODE_TRUNCATE: /*  Truncate */
+		/*  Truncate: Enable truncate (10bit), disable Dither */
+		set_round(xfm_dce,
+			DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
+			DCP_OUT_TRUNC_ROUND_DEPTH_10BIT);
+
+		set_dither(xfm_dce, false, spatial_dither_mode,
+			DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
+			rgb_random_enable, highpass_random_enable);
+		break;
+
+	case DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED: /*  Disabled */
+		/*  Truncate: Set round/truncate to bypass (12bit),
+		 * disable Dither */
+		set_round(xfm_dce,
+			DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
+			DCP_OUT_TRUNC_ROUND_DEPTH_12BIT);
+
+		set_dither(xfm_dce, false, spatial_dither_mode,
+			DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
+			rgb_random_enable, highpass_random_enable);
+		break;
+	default:
+		/* Invalid DCP Depth reduction mode */
+		BREAK_TO_DEBUGGER();
+		break;
+	}
+}
+
+static int dce_transform_get_max_num_of_supported_lines(
+	struct dce_transform *xfm_dce,
+	enum lb_pixel_depth depth,
+	int pixel_width)
+{
+	int pixels_per_entries = 0;
+	int max_pixels_supports = 0;
+
+	ASSERT(pixel_width);
+
+	/* Find number of pixels that can fit into a single LB entry and
+	 * take floor of the value since we cannot store a single pixel
+	 * across multiple entries. */
+	switch (depth) {
+	case LB_PIXEL_DEPTH_18BPP:
+		pixels_per_entries = xfm_dce->lb_bits_per_entry / 18;
+		break;
+
+	case LB_PIXEL_DEPTH_24BPP:
+		pixels_per_entries = xfm_dce->lb_bits_per_entry / 24;
+		break;
+
+	case LB_PIXEL_DEPTH_30BPP:
+		pixels_per_entries = xfm_dce->lb_bits_per_entry / 30;
+		break;
+
+	case LB_PIXEL_DEPTH_36BPP:
+		pixels_per_entries = xfm_dce->lb_bits_per_entry / 36;
+		break;
+
+	default:
+		dm_logger_write(xfm_dce->base.ctx->logger, LOG_WARNING,
+			"%s: Invalid LB pixel depth",
+			__func__);
+		BREAK_TO_DEBUGGER();
+		break;
+	}
+
+	ASSERT(pixels_per_entries);
+
+	max_pixels_supports =
+			pixels_per_entries *
+			xfm_dce->lb_memory_size;
+
+	return (max_pixels_supports / pixel_width);
+}
+
+static void set_denormalization(
+	struct dce_transform *xfm_dce,
+	enum dc_color_depth depth)
+{
+	int denorm_mode = 0;
+
+	switch (depth) {
+	case COLOR_DEPTH_666:
+		/* 63/64 for 6 bit output color depth */
+		denorm_mode = 1;
+		break;
+	case COLOR_DEPTH_888:
+		/* Unity for 8 bit output color depth
+		 * because prescale is disabled by default */
+		denorm_mode = 0;
+		break;
+	case COLOR_DEPTH_101010:
+		/* 1023/1024 for 10 bit output color depth */
+		denorm_mode = 3;
+		break;
+	case COLOR_DEPTH_121212:
+		/* 4095/4096 for 12 bit output color depth */
+		denorm_mode = 5;
+		break;
+	case COLOR_DEPTH_141414:
+	case COLOR_DEPTH_161616:
+	default:
+		/* not valid used case! */
+		break;
+	}
+
+	REG_SET(DENORM_CONTROL, 0, DENORM_MODE, denorm_mode);
+}
+
+static void dce_transform_set_pixel_storage_depth(
+	struct transform *xfm,
+	enum lb_pixel_depth depth,
+	const struct bit_depth_reduction_params *bit_depth_params)
+{
+	struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+	int pixel_depth, expan_mode;
+	enum dc_color_depth color_depth;
+
+	switch (depth) {
+	case LB_PIXEL_DEPTH_18BPP:
+		color_depth = COLOR_DEPTH_666;
+		pixel_depth = 2;
+		expan_mode  = 1;
+		break;
+	case LB_PIXEL_DEPTH_24BPP:
+		color_depth = COLOR_DEPTH_888;
+		pixel_depth = 1;
+		expan_mode  = 1;
+		break;
+	case LB_PIXEL_DEPTH_30BPP:
+		color_depth = COLOR_DEPTH_101010;
+		pixel_depth = 0;
+		expan_mode  = 1;
+		break;
+	case LB_PIXEL_DEPTH_36BPP:
+		color_depth = COLOR_DEPTH_121212;
+		pixel_depth = 3;
+		expan_mode  = 0;
+		break;
+	default:
+		color_depth = COLOR_DEPTH_101010;
+		pixel_depth = 0;
+		expan_mode  = 1;
+		BREAK_TO_DEBUGGER();
+		break;
+	}
+
+	set_denormalization(xfm_dce, color_depth);
+	program_bit_depth_reduction(xfm_dce, color_depth, bit_depth_params);
+
+	REG_UPDATE_2(LB_DATA_FORMAT,
+			PIXEL_DEPTH, pixel_depth,
+			PIXEL_EXPAN_MODE, expan_mode);
+
+	if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
+		/*we should use unsupported capabilities
+		 *  unless it is required by w/a*/
+		dm_logger_write(xfm->ctx->logger, LOG_WARNING,
+			"%s: Capability not supported",
+			__func__);
+	}
+}
+
+static void program_gamut_remap(
+	struct dce_transform *xfm_dce,
+	const uint16_t *reg_val)
+{
+	if (reg_val) {
+		REG_SET_2(GAMUT_REMAP_C11_C12, 0,
+				GAMUT_REMAP_C11, reg_val[0],
+				GAMUT_REMAP_C12, reg_val[1]);
+		REG_SET_2(GAMUT_REMAP_C13_C14, 0,
+				GAMUT_REMAP_C13, reg_val[2],
+				GAMUT_REMAP_C14, reg_val[3]);
+		REG_SET_2(GAMUT_REMAP_C21_C22, 0,
+				GAMUT_REMAP_C21, reg_val[4],
+				GAMUT_REMAP_C22, reg_val[5]);
+		REG_SET_2(GAMUT_REMAP_C23_C24, 0,
+				GAMUT_REMAP_C23, reg_val[6],
+				GAMUT_REMAP_C24, reg_val[7]);
+		REG_SET_2(GAMUT_REMAP_C31_C32, 0,
+				GAMUT_REMAP_C31, reg_val[8],
+				GAMUT_REMAP_C32, reg_val[9]);
+		REG_SET_2(GAMUT_REMAP_C33_C34, 0,
+				GAMUT_REMAP_C33, reg_val[10],
+				GAMUT_REMAP_C34, reg_val[11]);
+
+		REG_SET(GAMUT_REMAP_CONTROL, 0, GRPH_GAMUT_REMAP_MODE, 1);
+	} else
+		REG_SET(GAMUT_REMAP_CONTROL, 0, GRPH_GAMUT_REMAP_MODE, 0);
+
+}
+
+/**
+ *****************************************************************************
+ *  Function: dal_transform_wide_gamut_set_gamut_remap
+ *
+ *  @param [in] const struct xfm_grph_csc_adjustment *adjust
+ *
+ *  @return
+ *     void
+ *
+ *  @note calculate and apply color temperature adjustment to in Rgb color space
+ *
+ *  @see
+ *
+ *****************************************************************************
+ */
+static void dce_transform_set_gamut_remap(
+	struct transform *xfm,
+	const struct xfm_grph_csc_adjustment *adjust)
+{
+	struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+	if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+		/* Bypass if type is bypass or hw */
+		program_gamut_remap(xfm_dce, NULL);
+	else {
+		struct fixed31_32 arr_matrix[GAMUT_MATRIX_SIZE];
+		uint16_t arr_reg_val[GAMUT_MATRIX_SIZE];
+
+		arr_matrix[0] = adjust->temperature_matrix[0];
+		arr_matrix[1] = adjust->temperature_matrix[1];
+		arr_matrix[2] = adjust->temperature_matrix[2];
+		arr_matrix[3] = dal_fixed31_32_zero;
+
+		arr_matrix[4] = adjust->temperature_matrix[3];
+		arr_matrix[5] = adjust->temperature_matrix[4];
+		arr_matrix[6] = adjust->temperature_matrix[5];
+		arr_matrix[7] = dal_fixed31_32_zero;
+
+		arr_matrix[8] = adjust->temperature_matrix[6];
+		arr_matrix[9] = adjust->temperature_matrix[7];
+		arr_matrix[10] = adjust->temperature_matrix[8];
+		arr_matrix[11] = dal_fixed31_32_zero;
+
+		convert_float_matrix(
+			arr_reg_val, arr_matrix, GAMUT_MATRIX_SIZE);
+
+		program_gamut_remap(xfm_dce, arr_reg_val);
+	}
+}
+
+static uint32_t decide_taps(struct fixed31_32 ratio, uint32_t in_taps, bool chroma)
+{
+	uint32_t taps;
+
+	if (IDENTITY_RATIO(ratio)) {
+		return 1;
+	} else if (in_taps != 0) {
+		taps = in_taps;
+	} else {
+		taps = 4;
+	}
+
+	if (chroma) {
+		taps /= 2;
+		if (taps < 2)
+			taps = 2;
+	}
+
+	return taps;
+}
+
+
+bool dce_transform_get_optimal_number_of_taps(
+	struct transform *xfm,
+	struct scaler_data *scl_data,
+	const struct scaling_taps *in_taps)
+{
+	struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+	int pixel_width = scl_data->viewport.width;
+	int max_num_of_lines;
+
+	if (xfm_dce->prescaler_on &&
+			(scl_data->viewport.width > scl_data->recout.width))
+		pixel_width = scl_data->recout.width;
+
+	max_num_of_lines = dce_transform_get_max_num_of_supported_lines(
+		xfm_dce,
+		scl_data->lb_params.depth,
+		pixel_width);
+
+	/* Fail if in_taps are impossible */
+	if (in_taps->v_taps >= max_num_of_lines)
+		return false;
+
+	/*
+	 * Set taps according to this policy (in this order)
+	 * - Use 1 for no scaling
+	 * - Use input taps
+	 * - Use 4 and reduce as required by line buffer size
+	 * - Decide chroma taps if chroma is scaled
+	 *
+	 * Ignore input chroma taps. Decide based on non-chroma
+	 */
+	scl_data->taps.h_taps = decide_taps(scl_data->ratios.horz, in_taps->h_taps, false);
+	scl_data->taps.v_taps = decide_taps(scl_data->ratios.vert, in_taps->v_taps, false);
+	scl_data->taps.h_taps_c = decide_taps(scl_data->ratios.horz_c, in_taps->h_taps, true);
+	scl_data->taps.v_taps_c = decide_taps(scl_data->ratios.vert_c, in_taps->v_taps, true);
+
+	if (!IDENTITY_RATIO(scl_data->ratios.vert)) {
+		/* reduce v_taps if needed but ensure we have at least two */
+		if (in_taps->v_taps == 0
+				&& max_num_of_lines <= scl_data->taps.v_taps
+				&& scl_data->taps.v_taps > 1) {
+			scl_data->taps.v_taps = max_num_of_lines - 1;
+		}
+
+		if (scl_data->taps.v_taps <= 1)
+			return false;
+	}
+
+	if (!IDENTITY_RATIO(scl_data->ratios.vert_c)) {
+		/* reduce chroma v_taps if needed but ensure we have at least two */
+		if (max_num_of_lines <= scl_data->taps.v_taps_c && scl_data->taps.v_taps_c > 1) {
+			scl_data->taps.v_taps_c = max_num_of_lines - 1;
+		}
+
+		if (scl_data->taps.v_taps_c <= 1)
+			return false;
+	}
+
+	/* we've got valid taps */
+	return true;
+}
+
+static void dce_transform_reset(struct transform *xfm)
+{
+	struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+	xfm_dce->filter_h = NULL;
+	xfm_dce->filter_v = NULL;
+}
+
+
+static const struct transform_funcs dce_transform_funcs = {
+	.transform_reset = dce_transform_reset,
+	.transform_set_scaler =
+		dce_transform_set_scaler,
+	.transform_set_gamut_remap =
+		dce_transform_set_gamut_remap,
+	.transform_set_pixel_storage_depth =
+		dce_transform_set_pixel_storage_depth,
+	.transform_get_optimal_number_of_taps =
+		dce_transform_get_optimal_number_of_taps
+};
+
+/*****************************************/
+/* Constructor, Destructor               */
+/*****************************************/
+
+bool dce_transform_construct(
+	struct dce_transform *xfm_dce,
+	struct dc_context *ctx,
+	uint32_t inst,
+	const struct dce_transform_registers *regs,
+	const struct dce_transform_shift *xfm_shift,
+	const struct dce_transform_mask *xfm_mask)
+{
+	xfm_dce->base.ctx = ctx;
+
+	xfm_dce->base.inst = inst;
+	xfm_dce->base.funcs = &dce_transform_funcs;
+
+	xfm_dce->regs = regs;
+	xfm_dce->xfm_shift = xfm_shift;
+	xfm_dce->xfm_mask = xfm_mask;
+
+	xfm_dce->prescaler_on = true;
+	xfm_dce->lb_pixel_depth_supported =
+			LB_PIXEL_DEPTH_18BPP |
+			LB_PIXEL_DEPTH_24BPP |
+			LB_PIXEL_DEPTH_30BPP;
+
+	xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
+	xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/
+
+	return true;
+}

+ 313 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_transform.h

@@ -0,0 +1,313 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCE_DCE_TRANSFORM_H_
+#define _DCE_DCE_TRANSFORM_H_
+
+
+#include "transform.h"
+
+#define TO_DCE_TRANSFORM(transform)\
+	container_of(transform, struct dce_transform, base)
+
+#define LB_TOTAL_NUMBER_OF_ENTRIES 1712
+#define LB_BITS_PER_ENTRY 144
+
+#define XFM_COMMON_REG_LIST_DCE_BASE(id) \
+	SRI(LB_DATA_FORMAT, LB, id), \
+	SRI(GAMUT_REMAP_CONTROL, DCP, id), \
+	SRI(GAMUT_REMAP_C11_C12, DCP, id), \
+	SRI(GAMUT_REMAP_C13_C14, DCP, id), \
+	SRI(GAMUT_REMAP_C21_C22, DCP, id), \
+	SRI(GAMUT_REMAP_C23_C24, DCP, id), \
+	SRI(GAMUT_REMAP_C31_C32, DCP, id), \
+	SRI(GAMUT_REMAP_C33_C34, DCP, id), \
+	SRI(DENORM_CONTROL, DCP, id), \
+	SRI(DCP_SPATIAL_DITHER_CNTL, DCP, id), \
+	SRI(OUT_ROUND_CONTROL, DCP, id), \
+	SRI(OUT_CLAMP_CONTROL_R_CR, DCP, id), \
+	SRI(OUT_CLAMP_CONTROL_G_Y, DCP, id), \
+	SRI(OUT_CLAMP_CONTROL_B_CB, DCP, id), \
+	SRI(SCL_MODE, SCL, id), \
+	SRI(SCL_TAP_CONTROL, SCL, id), \
+	SRI(SCL_CONTROL, SCL, id), \
+	SRI(EXT_OVERSCAN_LEFT_RIGHT, SCL, id), \
+	SRI(EXT_OVERSCAN_TOP_BOTTOM, SCL, id), \
+	SRI(SCL_VERT_FILTER_CONTROL, SCL, id), \
+	SRI(SCL_HORZ_FILTER_CONTROL, SCL, id), \
+	SRI(SCL_COEF_RAM_SELECT, SCL, id), \
+	SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \
+	SRI(VIEWPORT_START, SCL, id), \
+	SRI(VIEWPORT_SIZE, SCL, id), \
+	SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \
+	SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \
+	SRI(SCL_HORZ_FILTER_INIT, SCL, id), \
+	SRI(SCL_VERT_FILTER_INIT, SCL, id), \
+	SRI(SCL_AUTOMATIC_MODE_CONTROL, SCL, id), \
+	SRI(LB_MEMORY_CTRL, LB, id), \
+	SRI(SCL_UPDATE, SCL, id)
+
+#define XFM_COMMON_REG_LIST_DCE100(id) \
+	XFM_COMMON_REG_LIST_DCE_BASE(id), \
+	SRI(DCFE_MEM_PWR_CTRL, CRTC, id), \
+	SRI(DCFE_MEM_PWR_STATUS, CRTC, id)
+
+#define XFM_COMMON_REG_LIST_DCE110(id) \
+	XFM_COMMON_REG_LIST_DCE_BASE(id), \
+	SRI(DCFE_MEM_PWR_CTRL, DCFE, id), \
+	SRI(DCFE_MEM_PWR_STATUS, DCFE, id)
+
+#define XFM_SF(reg_name, field_name, post_fix)\
+	.field_name = reg_name ## __ ## field_name ## post_fix
+
+#define XFM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+	XFM_SF(OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MIN_B_CB, mask_sh), \
+	XFM_SF(OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MAX_B_CB, mask_sh), \
+	XFM_SF(OUT_CLAMP_CONTROL_G_Y, OUT_CLAMP_MIN_G_Y, mask_sh), \
+	XFM_SF(OUT_CLAMP_CONTROL_G_Y, OUT_CLAMP_MAX_G_Y, mask_sh), \
+	XFM_SF(OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MIN_R_CR, mask_sh), \
+	XFM_SF(OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MAX_R_CR, mask_sh), \
+	XFM_SF(OUT_ROUND_CONTROL, OUT_ROUND_TRUNC_MODE, mask_sh), \
+	XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_EN, mask_sh), \
+	XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_MODE, mask_sh), \
+	XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_DEPTH, mask_sh), \
+	XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_FRAME_RANDOM_ENABLE, mask_sh), \
+	XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_RGB_RANDOM_ENABLE, mask_sh), \
+	XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_HIGHPASS_RANDOM_ENABLE, mask_sh), \
+	XFM_SF(DENORM_CONTROL, DENORM_MODE, mask_sh), \
+	XFM_SF(LB_DATA_FORMAT, PIXEL_DEPTH, mask_sh), \
+	XFM_SF(LB_DATA_FORMAT, PIXEL_EXPAN_MODE, mask_sh), \
+	XFM_SF(GAMUT_REMAP_C11_C12, GAMUT_REMAP_C11, mask_sh), \
+	XFM_SF(GAMUT_REMAP_C11_C12, GAMUT_REMAP_C12, mask_sh), \
+	XFM_SF(GAMUT_REMAP_C13_C14, GAMUT_REMAP_C13, mask_sh), \
+	XFM_SF(GAMUT_REMAP_C13_C14, GAMUT_REMAP_C14, mask_sh), \
+	XFM_SF(GAMUT_REMAP_C21_C22, GAMUT_REMAP_C21, mask_sh), \
+	XFM_SF(GAMUT_REMAP_C21_C22, GAMUT_REMAP_C22, mask_sh), \
+	XFM_SF(GAMUT_REMAP_C23_C24, GAMUT_REMAP_C23, mask_sh), \
+	XFM_SF(GAMUT_REMAP_C23_C24, GAMUT_REMAP_C24, mask_sh), \
+	XFM_SF(GAMUT_REMAP_C31_C32, GAMUT_REMAP_C31, mask_sh), \
+	XFM_SF(GAMUT_REMAP_C31_C32, GAMUT_REMAP_C32, mask_sh), \
+	XFM_SF(GAMUT_REMAP_C33_C34, GAMUT_REMAP_C33, mask_sh), \
+	XFM_SF(GAMUT_REMAP_C33_C34, GAMUT_REMAP_C34, mask_sh), \
+	XFM_SF(GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, mask_sh), \
+	XFM_SF(SCL_MODE, SCL_MODE, mask_sh), \
+	XFM_SF(SCL_TAP_CONTROL, SCL_H_NUM_OF_TAPS, mask_sh), \
+	XFM_SF(SCL_TAP_CONTROL, SCL_V_NUM_OF_TAPS, mask_sh), \
+	XFM_SF(SCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh), \
+	XFM_SF(EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh), \
+	XFM_SF(EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh), \
+	XFM_SF(EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh), \
+	XFM_SF(EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh), \
+	XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_FILTER_TYPE, mask_sh), \
+	XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_PHASE, mask_sh), \
+	XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_TAP_PAIR_IDX, mask_sh), \
+	XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF_EN, mask_sh), \
+	XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF, mask_sh), \
+	XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF_EN, mask_sh), \
+	XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF, mask_sh), \
+	XFM_SF(VIEWPORT_START, VIEWPORT_X_START, mask_sh), \
+	XFM_SF(VIEWPORT_START, VIEWPORT_Y_START, mask_sh), \
+	XFM_SF(VIEWPORT_SIZE, VIEWPORT_HEIGHT, mask_sh), \
+	XFM_SF(VIEWPORT_SIZE, VIEWPORT_WIDTH, mask_sh), \
+	XFM_SF(SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh), \
+	XFM_SF(SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh), \
+	XFM_SF(SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh), \
+	XFM_SF(SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh), \
+	XFM_SF(SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh), \
+	XFM_SF(SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh), \
+	XFM_SF(LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mask_sh), \
+	XFM_SF(LB_MEMORY_CTRL, LB_MEMORY_SIZE, mask_sh), \
+	XFM_SF(SCL_VERT_FILTER_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh), \
+	XFM_SF(SCL_HORZ_FILTER_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh), \
+	XFM_SF(SCL_UPDATE, SCL_COEF_UPDATE_COMPLETE, mask_sh), \
+	XFM_SF(LB_DATA_FORMAT, ALPHA_EN, mask_sh)
+
+#define XFM_COMMON_MASK_SH_LIST_DCE110(mask_sh) \
+	XFM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+	XFM_SF(DCFE_MEM_PWR_CTRL, SCL_COEFF_MEM_PWR_DIS, mask_sh), \
+	XFM_SF(DCFE_MEM_PWR_STATUS, SCL_COEFF_MEM_PWR_STATE, mask_sh), \
+	XFM_SF(SCL_MODE, SCL_PSCL_EN, mask_sh)
+
+#define XFM_REG_FIELD_LIST(type) \
+	type OUT_CLAMP_MIN_B_CB; \
+	type OUT_CLAMP_MAX_B_CB; \
+	type OUT_CLAMP_MIN_G_Y; \
+	type OUT_CLAMP_MAX_G_Y; \
+	type OUT_CLAMP_MIN_R_CR; \
+	type OUT_CLAMP_MAX_R_CR; \
+	type OUT_ROUND_TRUNC_MODE; \
+	type DCP_SPATIAL_DITHER_EN; \
+	type DCP_SPATIAL_DITHER_MODE; \
+	type DCP_SPATIAL_DITHER_DEPTH; \
+	type DCP_FRAME_RANDOM_ENABLE; \
+	type DCP_RGB_RANDOM_ENABLE; \
+	type DCP_HIGHPASS_RANDOM_ENABLE; \
+	type DENORM_MODE; \
+	type PIXEL_DEPTH; \
+	type PIXEL_EXPAN_MODE; \
+	type GAMUT_REMAP_C11; \
+	type GAMUT_REMAP_C12; \
+	type GAMUT_REMAP_C13; \
+	type GAMUT_REMAP_C14; \
+	type GAMUT_REMAP_C21; \
+	type GAMUT_REMAP_C22; \
+	type GAMUT_REMAP_C23; \
+	type GAMUT_REMAP_C24; \
+	type GAMUT_REMAP_C31; \
+	type GAMUT_REMAP_C32; \
+	type GAMUT_REMAP_C33; \
+	type GAMUT_REMAP_C34; \
+	type GRPH_GAMUT_REMAP_MODE; \
+	type SCL_MODE; \
+	type SCL_PSCL_EN; \
+	type SCL_H_NUM_OF_TAPS; \
+	type SCL_V_NUM_OF_TAPS; \
+	type SCL_BOUNDARY_MODE; \
+	type EXT_OVERSCAN_LEFT; \
+	type EXT_OVERSCAN_RIGHT; \
+	type EXT_OVERSCAN_TOP; \
+	type EXT_OVERSCAN_BOTTOM; \
+	type SCL_COEFF_MEM_PWR_DIS; \
+	type SCL_COEFF_MEM_PWR_STATE; \
+	type SCL_C_RAM_FILTER_TYPE; \
+	type SCL_C_RAM_PHASE; \
+	type SCL_C_RAM_TAP_PAIR_IDX; \
+	type SCL_C_RAM_EVEN_TAP_COEF_EN; \
+	type SCL_C_RAM_EVEN_TAP_COEF; \
+	type SCL_C_RAM_ODD_TAP_COEF_EN; \
+	type SCL_C_RAM_ODD_TAP_COEF; \
+	type VIEWPORT_X_START; \
+	type VIEWPORT_Y_START; \
+	type VIEWPORT_HEIGHT; \
+	type VIEWPORT_WIDTH; \
+	type SCL_H_SCALE_RATIO; \
+	type SCL_V_SCALE_RATIO; \
+	type SCL_H_INIT_INT; \
+	type SCL_H_INIT_FRAC; \
+	type SCL_V_INIT_INT; \
+	type SCL_V_INIT_FRAC; \
+	type LB_MEMORY_CONFIG; \
+	type LB_MEMORY_SIZE; \
+	type SCL_V_2TAP_HARDCODE_COEF_EN; \
+	type SCL_H_2TAP_HARDCODE_COEF_EN; \
+	type SCL_COEF_UPDATE_COMPLETE; \
+	type ALPHA_EN
+
+struct dce_transform_shift {
+	XFM_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_transform_mask {
+	XFM_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_transform_registers {
+	uint32_t LB_DATA_FORMAT;
+	uint32_t GAMUT_REMAP_CONTROL;
+	uint32_t GAMUT_REMAP_C11_C12;
+	uint32_t GAMUT_REMAP_C13_C14;
+	uint32_t GAMUT_REMAP_C21_C22;
+	uint32_t GAMUT_REMAP_C23_C24;
+	uint32_t GAMUT_REMAP_C31_C32;
+	uint32_t GAMUT_REMAP_C33_C34;
+	uint32_t DENORM_CONTROL;
+	uint32_t DCP_SPATIAL_DITHER_CNTL;
+	uint32_t OUT_ROUND_CONTROL;
+	uint32_t OUT_CLAMP_CONTROL_R_CR;
+	uint32_t OUT_CLAMP_CONTROL_G_Y;
+	uint32_t OUT_CLAMP_CONTROL_B_CB;
+	uint32_t SCL_MODE;
+	uint32_t SCL_TAP_CONTROL;
+	uint32_t SCL_CONTROL;
+	uint32_t EXT_OVERSCAN_LEFT_RIGHT;
+	uint32_t EXT_OVERSCAN_TOP_BOTTOM;
+	uint32_t SCL_VERT_FILTER_CONTROL;
+	uint32_t SCL_HORZ_FILTER_CONTROL;
+	uint32_t DCFE_MEM_PWR_CTRL;
+	uint32_t DCFE_MEM_PWR_STATUS;
+	uint32_t SCL_COEF_RAM_SELECT;
+	uint32_t SCL_COEF_RAM_TAP_DATA;
+	uint32_t VIEWPORT_START;
+	uint32_t VIEWPORT_SIZE;
+	uint32_t SCL_HORZ_FILTER_SCALE_RATIO;
+	uint32_t SCL_VERT_FILTER_SCALE_RATIO;
+	uint32_t SCL_HORZ_FILTER_INIT;
+	uint32_t SCL_VERT_FILTER_INIT;
+	uint32_t SCL_AUTOMATIC_MODE_CONTROL;
+	uint32_t LB_MEMORY_CTRL;
+	uint32_t SCL_UPDATE;
+};
+
+struct init_int_and_frac {
+	uint32_t integer;
+	uint32_t fraction;
+};
+
+struct scl_ratios_inits {
+	uint32_t h_int_scale_ratio;
+	uint32_t v_int_scale_ratio;
+	struct init_int_and_frac h_init;
+	struct init_int_and_frac v_init;
+};
+
+enum ram_filter_type {
+	FILTER_TYPE_RGB_Y_VERTICAL	= 0, /* 0 - RGB/Y Vertical filter */
+	FILTER_TYPE_CBCR_VERTICAL	= 1, /* 1 - CbCr  Vertical filter */
+	FILTER_TYPE_RGB_Y_HORIZONTAL	= 2, /* 1 - RGB/Y Horizontal filter */
+	FILTER_TYPE_CBCR_HORIZONTAL	= 3, /* 3 - CbCr  Horizontal filter */
+	FILTER_TYPE_ALPHA_VERTICAL	= 4, /* 4 - Alpha Vertical filter. */
+	FILTER_TYPE_ALPHA_HORIZONTAL	= 5, /* 5 - Alpha Horizontal filter. */
+};
+
+struct dce_transform {
+	struct transform base;
+	const struct dce_transform_registers *regs;
+	const struct dce_transform_shift *xfm_shift;
+	const struct dce_transform_mask *xfm_mask;
+
+	const uint16_t *filter_v;
+	const uint16_t *filter_h;
+	const uint16_t *filter_v_c;
+	const uint16_t *filter_h_c;
+	int lb_pixel_depth_supported;
+	int lb_memory_size;
+	int lb_bits_per_entry;
+	bool prescaler_on;
+};
+
+bool dce_transform_construct(struct dce_transform *xfm110,
+	struct dc_context *ctx,
+	uint32_t inst,
+	const struct dce_transform_registers *regs,
+	const struct dce_transform_shift *xfm_shift,
+	const struct dce_transform_mask *xfm_mask);
+
+bool dce_transform_get_optimal_number_of_taps(
+	struct transform *xfm,
+	struct scaler_data *scl_data,
+	const struct scaling_taps *in_taps);
+
+
+#endif /* _DCE_DCE_TRANSFORM_H_ */

+ 23 - 0
drivers/gpu/drm/amd/display/dc/dce100/Makefile

@@ -0,0 +1,23 @@
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+DCE100 = dce100_resource.o dce100_hw_sequencer.o
+
+AMD_DAL_DCE100 = $(addprefix $(AMDDALPATH)/dc/dce100/,$(DCE100))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE100)
+
+
+###############################################################################
+# DCE 10x
+###############################################################################
+ifdef 0#CONFIG_DRM_AMD_DC_DCE11_0
+TG_DCE100 = dce100_resource.o
+
+AMD_DAL_TG_DCE100 = $(addprefix \
+	$(AMDDALPATH)/dc/dce100/,$(TG_DCE100))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_TG_DCE100)
+endif
+

+ 140 - 0
drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c

@@ -0,0 +1,140 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "dc.h"
+#include "core_dc.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+#include "dce100_hw_sequencer.h"
+#include "dce110/dce110_hw_sequencer.h"
+
+/* include DCE10 register header files */
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+struct dce100_hw_seq_reg_offsets {
+	uint32_t blnd;
+	uint32_t crtc;
+};
+
+static const struct dce100_hw_seq_reg_offsets reg_offsets[] = {
+{
+	.crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+	.crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+	.crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+	.crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+	.crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+	.crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+}
+};
+
+#define HW_REG_CRTC(reg, id)\
+	(reg + reg_offsets[id].crtc)
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+/***************************PIPE_CONTROL***********************************/
+
+static bool dce100_enable_display_power_gating(
+	struct core_dc *dc,
+	uint8_t controller_id,
+	struct dc_bios *dcb,
+	enum pipe_gating_control power_gating)
+{
+	enum bp_result bp_result = BP_RESULT_OK;
+	enum bp_pipe_control_action cntl;
+	struct dc_context *ctx = dc->ctx;
+
+	if (power_gating == PIPE_GATING_CONTROL_INIT)
+		cntl = ASIC_PIPE_INIT;
+	else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
+		cntl = ASIC_PIPE_ENABLE;
+	else
+		cntl = ASIC_PIPE_DISABLE;
+
+	if (!(power_gating == PIPE_GATING_CONTROL_INIT && controller_id != 0)){
+
+		bp_result = dcb->funcs->enable_disp_power_gating(
+						dcb, controller_id + 1, cntl);
+
+		/* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
+		 * by default when command table is called
+		 */
+		dm_write_reg(ctx,
+			HW_REG_CRTC(mmMASTER_UPDATE_MODE, controller_id),
+			0);
+	}
+
+	if (bp_result == BP_RESULT_OK)
+		return true;
+	else
+		return false;
+}
+
+static void set_display_mark_for_pipe_if_needed(struct core_dc *dc,
+		struct pipe_ctx *pipe_ctx,
+		struct validate_context *context)
+{
+	/* Do nothing until we have proper bandwitdth calcs */
+}
+
+static void set_displaymarks(
+		const struct core_dc *dc, struct validate_context *context)
+{
+	/* Do nothing until we have proper bandwitdth calcs */
+}
+
+static void set_bandwidth(struct core_dc *dc)
+{
+	/* Do nothing until we have proper bandwitdth calcs */
+}
+
+
+/**************************************************************************/
+
+bool dce100_hw_sequencer_construct(struct core_dc *dc)
+{
+	dce110_hw_sequencer_construct(dc);
+
+	/* TODO: dce80 is empty implementation at the moment*/
+	dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
+	dc->hwss.set_displaymarks = set_displaymarks;
+	dc->hwss.increase_watermarks_for_pipe = set_display_mark_for_pipe_if_needed;
+	dc->hwss.set_bandwidth = set_bandwidth;
+
+	return true;
+}
+

+ 36 - 0
drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h

@@ -0,0 +1,36 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE100_H__
+#define __DC_HWSS_DCE100_H__
+
+#include "core_types.h"
+
+struct core_dc;
+
+bool dce100_hw_sequencer_construct(struct core_dc *dc);
+
+#endif /* __DC_HWSS_DCE100_H__ */
+

+ 1085 - 0
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c

@@ -0,0 +1,1085 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+
+#include "link_encoder.h"
+#include "stream_encoder.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "../virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dce110/dce110_timing_generator.h"
+#include "irq/dce110/irq_service_dce110.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce110/dce110_mem_input.h"
+#include "dce110/dce110_mem_input_v.h"
+#include "dce110/dce110_ipp.h"
+#include "dce/dce_transform.h"
+#include "dce110/dce110_opp.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "dce100/dce100_hw_sequencer.h"
+
+#include "reg_helper.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
+#include "gmc/gmc_8_2_d.h"
+#include "gmc/gmc_8_2_sh_mask.h"
+#endif
+
+#ifndef mmDP_DPHY_INTERNAL_CTRL
+	#define mmDP_DPHY_INTERNAL_CTRL 0x4aa7
+	#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7
+	#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7
+	#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7
+	#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7
+	#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7
+	#define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7
+	#define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7
+	#define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7
+	#define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7
+#endif
+
+#ifndef mmBIOS_SCRATCH_2
+	#define mmBIOS_SCRATCH_2 0x05CB
+	#define mmBIOS_SCRATCH_6 0x05CF
+#endif
+
+#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL
+	#define mmDP_DPHY_BS_SR_SWAP_CNTL                       0x4ADC
+	#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL                   0x4ADC
+	#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL                   0x4BDC
+	#define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL                   0x4CDC
+	#define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL                   0x4DDC
+	#define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL                   0x4EDC
+	#define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL                   0x4FDC
+	#define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL                   0x54DC
+#endif
+
+#ifndef mmDP_DPHY_FAST_TRAINING
+	#define mmDP_DPHY_FAST_TRAINING                         0x4ABC
+	#define mmDP0_DP_DPHY_FAST_TRAINING                     0x4ABC
+	#define mmDP1_DP_DPHY_FAST_TRAINING                     0x4BBC
+	#define mmDP2_DP_DPHY_FAST_TRAINING                     0x4CBC
+	#define mmDP3_DP_DPHY_FAST_TRAINING                     0x4DBC
+	#define mmDP4_DP_DPHY_FAST_TRAINING                     0x4EBC
+	#define mmDP5_DP_DPHY_FAST_TRAINING                     0x4FBC
+	#define mmDP6_DP_DPHY_FAST_TRAINING                     0x54BC
+#endif
+
+static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
+	{
+		.crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
+		.dcp =  (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
+	},
+	{
+		.crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
+		.dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
+	},
+	{
+		.crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
+		.dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
+	},
+	{
+		.crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
+		.dcp =  (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
+	},
+	{
+		.crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
+		.dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
+	},
+	{
+		.crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
+		.dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
+	}
+};
+
+static const struct dce110_mem_input_reg_offsets dce100_mi_reg_offsets[] = {
+	{
+		.dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
+		.dmif = (mmDMIF_PG0_DPG_WATERMARK_MASK_CONTROL
+				- mmDPG_WATERMARK_MASK_CONTROL),
+		.pipe = (mmPIPE0_DMIF_BUFFER_CONTROL
+				- mmPIPE0_DMIF_BUFFER_CONTROL),
+	},
+	{
+		.dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
+		.dmif = (mmDMIF_PG1_DPG_WATERMARK_MASK_CONTROL
+				- mmDPG_WATERMARK_MASK_CONTROL),
+		.pipe = (mmPIPE1_DMIF_BUFFER_CONTROL
+				- mmPIPE0_DMIF_BUFFER_CONTROL),
+	},
+	{
+		.dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
+		.dmif = (mmDMIF_PG2_DPG_WATERMARK_MASK_CONTROL
+				- mmDPG_WATERMARK_MASK_CONTROL),
+		.pipe = (mmPIPE2_DMIF_BUFFER_CONTROL
+				- mmPIPE0_DMIF_BUFFER_CONTROL),
+	},
+	{
+		.dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
+		.dmif = (mmDMIF_PG3_DPG_WATERMARK_MASK_CONTROL
+				- mmDPG_WATERMARK_MASK_CONTROL),
+		.pipe = (mmPIPE3_DMIF_BUFFER_CONTROL
+				- mmPIPE0_DMIF_BUFFER_CONTROL),
+	},
+	{
+		.dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
+		.dmif = (mmDMIF_PG4_DPG_WATERMARK_MASK_CONTROL
+				- mmDPG_WATERMARK_MASK_CONTROL),
+		.pipe = (mmPIPE4_DMIF_BUFFER_CONTROL
+				- mmPIPE0_DMIF_BUFFER_CONTROL),
+	},
+	{
+		.dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
+		.dmif = (mmDMIF_PG5_DPG_WATERMARK_MASK_CONTROL
+				- mmDPG_WATERMARK_MASK_CONTROL),
+		.pipe = (mmPIPE5_DMIF_BUFFER_CONTROL
+				- mmPIPE0_DMIF_BUFFER_CONTROL),
+	}
+};
+
+
+static const struct dce110_ipp_reg_offsets dce100_ipp_reg_offsets[] = {
+{
+	.dcp_offset = (mmDCP0_CUR_CONTROL - mmCUR_CONTROL),
+},
+{
+	.dcp_offset = (mmDCP1_CUR_CONTROL - mmCUR_CONTROL),
+},
+{
+	.dcp_offset = (mmDCP2_CUR_CONTROL - mmCUR_CONTROL),
+},
+{
+	.dcp_offset = (mmDCP3_CUR_CONTROL - mmCUR_CONTROL),
+},
+{
+	.dcp_offset = (mmDCP4_CUR_CONTROL - mmCUR_CONTROL),
+},
+{
+	.dcp_offset = (mmDCP5_CUR_CONTROL - mmCUR_CONTROL),
+}
+};
+
+
+
+/* set register offset */
+#define SR(reg_name)\
+	.reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+	.reg_name = mm ## block ## id ## _ ## reg_name
+
+
+#define transform_regs(id)\
+[id] = {\
+		XFM_COMMON_REG_LIST_DCE100(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+		transform_regs(0),
+		transform_regs(1),
+		transform_regs(2),
+		transform_regs(3),
+		transform_regs(4),
+		transform_regs(5)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+		XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+		XFM_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+	AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+		aux_regs(0),
+		aux_regs(1),
+		aux_regs(2),
+		aux_regs(3),
+		aux_regs(4),
+		aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+	HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+		hpd_regs(0),
+		hpd_regs(1),
+		hpd_regs(2),
+		hpd_regs(3),
+		hpd_regs(4),
+		hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+	LE_DCE110_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+	link_regs(0),
+	link_regs(1),
+	link_regs(2),
+	link_regs(3),
+	link_regs(4),
+	link_regs(5),
+	link_regs(6),
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+	SE_COMMON_REG_LIST_DCE_BASE(id),\
+	.AFMT_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+	stream_enc_regs(0),
+	stream_enc_regs(1),
+	stream_enc_regs(2),
+	stream_enc_regs(3),
+	stream_enc_regs(4),
+	stream_enc_regs(5),
+	stream_enc_regs(6)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+		SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+		SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+	AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+	audio_regs(0),
+	audio_regs(1),
+	audio_regs(2),
+	audio_regs(3),
+	audio_regs(4),
+	audio_regs(5),
+	audio_regs(6),
+};
+
+static const struct dce_audio_shift audio_shift = {
+		AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+		AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define clk_src_regs(id)\
+[id] = {\
+	CS_COMMON_REG_LIST_DCE_100_110(id),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+	clk_src_regs(0),
+	clk_src_regs(1),
+	clk_src_regs(2)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+		CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+		CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+
+
+#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03
+
+static const struct dce110_opp_reg_offsets dce100_opp_reg_offsets[] = {
+{
+	.fmt_offset = (mmFMT0_FMT_CONTROL - mmFMT_CONTROL),
+	.dcfe_offset = (mmCRTC0_DCFE_MEM_PWR_CTRL - DCFE_MEM_PWR_CTRL_REG_BASE),
+	.dcp_offset = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
+},
+{	.fmt_offset = (mmFMT1_FMT_CONTROL - mmFMT0_FMT_CONTROL),
+	.dcfe_offset = (mmCRTC1_DCFE_MEM_PWR_CTRL - DCFE_MEM_PWR_CTRL_REG_BASE),
+	.dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{	.fmt_offset = (mmFMT2_FMT_CONTROL - mmFMT0_FMT_CONTROL),
+	.dcfe_offset = (mmCRTC2_DCFE_MEM_PWR_CTRL - DCFE_MEM_PWR_CTRL_REG_BASE),
+	.dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+	.fmt_offset = (mmFMT3_FMT_CONTROL - mmFMT0_FMT_CONTROL),
+	.dcfe_offset = (mmCRTC3_DCFE_MEM_PWR_CTRL - DCFE_MEM_PWR_CTRL_REG_BASE),
+	.dcp_offset = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{	.fmt_offset = (mmFMT4_FMT_CONTROL - mmFMT0_FMT_CONTROL),
+	.dcfe_offset = (mmCRTC4_DCFE_MEM_PWR_CTRL - DCFE_MEM_PWR_CTRL_REG_BASE),
+	.dcp_offset = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{	.fmt_offset = (mmFMT5_FMT_CONTROL - mmFMT0_FMT_CONTROL),
+	.dcfe_offset = (mmCRTC5_DCFE_MEM_PWR_CTRL - DCFE_MEM_PWR_CTRL_REG_BASE),
+	.dcp_offset = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+}
+};
+
+static const struct bios_registers bios_regs = {
+	.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
+};
+
+static const struct resource_caps res_cap = {
+	.num_timing_generator = 6,
+	.num_audio = 6,
+	.num_stream_encoder = 6,
+	.num_pll = 3
+};
+
+#define CTX  ctx
+#define REG(reg) mm ## reg
+
+#ifndef mmCC_DC_HDMI_STRAPS
+#define mmCC_DC_HDMI_STRAPS 0x1918
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#endif
+
+static void read_dce_straps(
+	struct dc_context *ctx,
+	struct resource_straps *straps)
+{
+	REG_GET_2(CC_DC_HDMI_STRAPS,
+			HDMI_DISABLE, &straps->hdmi_disable,
+			AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
+
+	REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+		struct dc_context *ctx, unsigned int inst)
+{
+	return dce_audio_create(ctx, inst,
+			&audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct timing_generator *dce100_timing_generator_create(
+		struct dc_context *ctx,
+		uint32_t instance,
+		const struct dce110_timing_generator_offsets *offsets)
+{
+	struct dce110_timing_generator *tg110 =
+		dm_alloc(sizeof(struct dce110_timing_generator));
+
+	if (!tg110)
+		return NULL;
+
+	if (dce110_timing_generator_construct(tg110, ctx, instance,
+			offsets))
+		return &tg110->base;
+
+	BREAK_TO_DEBUGGER();
+	dm_free(tg110);
+	return NULL;
+}
+
+static struct stream_encoder *dce100_stream_encoder_create(
+	enum engine_id eng_id,
+	struct dc_context *ctx)
+{
+	struct dce110_stream_encoder *enc110 =
+		dm_alloc(sizeof(struct dce110_stream_encoder));
+
+	if (!enc110)
+		return NULL;
+
+	if (dce110_stream_encoder_construct(
+			enc110, ctx, ctx->dc_bios, eng_id,
+			&stream_enc_regs[eng_id], &se_shift, &se_mask))
+		return &enc110->base;
+
+	BREAK_TO_DEBUGGER();
+	dm_free(enc110);
+	return NULL;
+}
+
+#define SRII(reg_name, block, id)\
+	.reg_name[id] = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_reg = {
+		HWSEQ_DCE10_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+		HWSEQ_DCE10_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+		HWSEQ_DCE10_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dce100_hwseq_create(
+	struct dc_context *ctx)
+{
+	struct dce_hwseq *hws = dm_alloc(sizeof(struct dce_hwseq));
+
+	if (hws) {
+		hws->ctx = ctx;
+		hws->regs = &hwseq_reg;
+		hws->shifts = &hwseq_shift;
+		hws->masks = &hwseq_mask;
+	}
+	return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+	.read_dce_straps = read_dce_straps,
+	.create_audio = create_audio,
+	.create_stream_encoder = dce100_stream_encoder_create,
+	.create_hwseq = dce100_hwseq_create,
+};
+
+#define mi_inst_regs(id) { \
+	MI_REG_LIST(id), \
+	.MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \
+}
+static const struct dce_mem_input_registers mi_regs[] = {
+		mi_inst_regs(0),
+		mi_inst_regs(1),
+		mi_inst_regs(2),
+		mi_inst_regs(3),
+		mi_inst_regs(4),
+		mi_inst_regs(5),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+		MI_DCE_MASK_SH_LIST(__SHIFT),
+		.ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+		MI_DCE_MASK_SH_LIST(_MASK),
+		.ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
+};
+
+static struct mem_input *dce100_mem_input_create(
+	struct dc_context *ctx,
+	uint32_t inst,
+	const struct dce110_mem_input_reg_offsets *offset)
+{
+	struct dce110_mem_input *mem_input110 =
+		dm_alloc(sizeof(struct dce110_mem_input));
+
+	if (!mem_input110)
+		return NULL;
+
+	if (dce110_mem_input_construct(mem_input110, ctx, inst, offset)) {
+		struct mem_input *mi = &mem_input110->base;
+
+		mi->regs = &mi_regs[inst];
+		mi->shifts = &mi_shifts;
+		mi->masks = &mi_masks;
+		mi->wa.single_head_rdreq_dmif_limit = 2;
+		return mi;
+	}
+
+	BREAK_TO_DEBUGGER();
+	dm_free(mem_input110);
+	return NULL;
+}
+
+static void dce100_transform_destroy(struct transform **xfm)
+{
+	dm_free(TO_DCE_TRANSFORM(*xfm));
+	*xfm = NULL;
+}
+
+static struct transform *dce100_transform_create(
+	struct dc_context *ctx,
+	uint32_t inst)
+{
+	struct dce_transform *transform =
+		dm_alloc(sizeof(struct dce_transform));
+
+	if (!transform)
+		return NULL;
+
+	if (dce_transform_construct(transform, ctx, inst,
+			&xfm_regs[inst], &xfm_shift, &xfm_mask)) {
+		return &transform->base;
+	}
+
+	BREAK_TO_DEBUGGER();
+	dm_free(transform);
+	return NULL;
+}
+
+static struct input_pixel_processor *dce100_ipp_create(
+	struct dc_context *ctx,
+	uint32_t inst,
+	const struct dce110_ipp_reg_offsets *offsets)
+{
+	struct dce110_ipp *ipp =
+		dm_alloc(sizeof(struct dce110_ipp));
+
+	if (!ipp)
+		return NULL;
+
+	if (dce110_ipp_construct(ipp, ctx, inst, offsets))
+		return &ipp->base;
+
+	BREAK_TO_DEBUGGER();
+	dm_free(ipp);
+	return NULL;
+}
+
+struct link_encoder *dce100_link_encoder_create(
+	const struct encoder_init_data *enc_init_data)
+{
+	struct dce110_link_encoder *enc110 =
+		dm_alloc(sizeof(struct dce110_link_encoder));
+
+	if (!enc110)
+		return NULL;
+
+	if (dce110_link_encoder_construct(
+			enc110,
+			enc_init_data,
+			&link_enc_regs[enc_init_data->transmitter],
+			&link_enc_aux_regs[enc_init_data->channel - 1],
+			&link_enc_hpd_regs[enc_init_data->hpd_source])) {
+
+		enc110->base.features.ycbcr420_supported = false;
+		enc110->base.features.max_hdmi_pixel_clock = 300000;
+		return &enc110->base;
+	}
+
+	BREAK_TO_DEBUGGER();
+	dm_free(enc110);
+	return NULL;
+}
+
+struct output_pixel_processor *dce100_opp_create(
+	struct dc_context *ctx,
+	uint32_t inst,
+	const struct dce110_opp_reg_offsets *offset)
+{
+	struct dce110_opp *opp =
+		dm_alloc(sizeof(struct dce110_opp));
+
+	if (!opp)
+		return NULL;
+
+	if (dce110_opp_construct(opp,
+			ctx, inst, offset))
+		return &opp->base;
+
+	BREAK_TO_DEBUGGER();
+	dm_free(opp);
+	return NULL;
+}
+
+void dce100_opp_destroy(struct output_pixel_processor **opp)
+{
+	struct dce110_opp *dce110_opp;
+
+	if (!opp || !*opp)
+		return;
+
+	dce110_opp = FROM_DCE11_OPP(*opp);
+
+	dm_free(dce110_opp->regamma.coeff128_dx);
+	dm_free(dce110_opp->regamma.coeff128_oem);
+	dm_free(dce110_opp->regamma.coeff128);
+	dm_free(dce110_opp->regamma.axis_x_1025);
+	dm_free(dce110_opp->regamma.axis_x_256);
+	dm_free(dce110_opp->regamma.coordinates_x);
+	dm_free(dce110_opp->regamma.rgb_regamma);
+	dm_free(dce110_opp->regamma.rgb_resulted);
+	dm_free(dce110_opp->regamma.rgb_oem);
+	dm_free(dce110_opp->regamma.rgb_user);
+	dm_free(dce110_opp);
+
+	*opp = NULL;
+}
+
+struct clock_source *dce100_clock_source_create(
+	struct dc_context *ctx,
+	struct dc_bios *bios,
+	enum clock_source_id id,
+	const struct dce110_clk_src_regs *regs,
+	bool dp_clk_src)
+{
+	struct dce110_clk_src *clk_src =
+		dm_alloc(sizeof(struct dce110_clk_src));
+
+	if (!clk_src)
+		return NULL;
+
+	if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+			regs, &cs_shift, &cs_mask)) {
+		clk_src->base.dp_clk_src = dp_clk_src;
+		return &clk_src->base;
+	}
+
+	BREAK_TO_DEBUGGER();
+	return NULL;
+}
+
+void dce100_clock_source_destroy(struct clock_source **clk_src)
+{
+	dm_free(TO_DCE110_CLK_SRC(*clk_src));
+	*clk_src = NULL;
+}
+
+static void destruct(struct dce110_resource_pool *pool)
+{
+	unsigned int i;
+
+	for (i = 0; i < pool->base.pipe_count; i++) {
+		if (pool->base.opps[i] != NULL)
+			dce100_opp_destroy(&pool->base.opps[i]);
+
+		if (pool->base.transforms[i] != NULL)
+			dce100_transform_destroy(&pool->base.transforms[i]);
+
+		if (pool->base.ipps[i] != NULL)
+			dce110_ipp_destroy(&pool->base.ipps[i]);
+
+		if (pool->base.mis[i] != NULL) {
+			dm_free(TO_DCE110_MEM_INPUT(pool->base.mis[i]));
+			pool->base.mis[i] = NULL;
+		}
+
+		if (pool->base.timing_generators[i] != NULL)	{
+			dm_free(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+			pool->base.timing_generators[i] = NULL;
+		}
+	}
+
+	for (i = 0; i < pool->base.stream_enc_count; i++) {
+		if (pool->base.stream_enc[i] != NULL)
+			dm_free(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+	}
+
+	for (i = 0; i < pool->base.clk_src_count; i++) {
+		if (pool->base.clock_sources[i] != NULL)
+			dce100_clock_source_destroy(&pool->base.clock_sources[i]);
+	}
+
+	if (pool->base.dp_clock_source != NULL)
+		dce100_clock_source_destroy(&pool->base.dp_clock_source);
+
+	for (i = 0; i < pool->base.audio_count; i++)	{
+		if (pool->base.audios[i] != NULL)
+			dce_aud_destroy(&pool->base.audios[i]);
+	}
+
+	if (pool->base.display_clock != NULL)
+		dal_display_clock_destroy(&pool->base.display_clock);
+
+	if (pool->base.irqs != NULL)
+		dal_irq_service_destroy(&pool->base.irqs);
+}
+
+static enum dc_status validate_mapped_resource(
+		const struct core_dc *dc,
+		struct validate_context *context)
+{
+	enum dc_status status = DC_OK;
+	uint8_t i, j, k;
+
+	for (i = 0; i < context->target_count; i++) {
+		struct core_target *target = context->targets[i];
+
+		for (j = 0; j < target->public.stream_count; j++) {
+			struct core_stream *stream =
+				DC_STREAM_TO_CORE(target->public.streams[j]);
+			struct core_link *link = stream->sink->link;
+
+			if (resource_is_stream_unchanged(dc->current_context, stream))
+				continue;
+
+			for (k = 0; k < MAX_PIPES; k++) {
+				struct pipe_ctx *pipe_ctx =
+					&context->res_ctx.pipe_ctx[k];
+
+				if (context->res_ctx.pipe_ctx[k].stream != stream)
+					continue;
+
+				if (!pipe_ctx->tg->funcs->validate_timing(
+						pipe_ctx->tg, &stream->public.timing))
+					return DC_FAIL_CONTROLLER_VALIDATE;
+
+				status = dce110_resource_build_pipe_hw_param(pipe_ctx);
+
+				if (status != DC_OK)
+					return status;
+
+				if (!link->link_enc->funcs->validate_output_with_stream(
+						link->link_enc,
+						pipe_ctx))
+					return DC_FAIL_ENC_VALIDATE;
+
+				/* TODO: validate audio ASIC caps, encoder */
+				status = dc_link_validate_mode_timing(stream,
+						link,
+						&stream->public.timing);
+
+				if (status != DC_OK)
+					return status;
+
+				resource_build_info_frame(pipe_ctx);
+
+				/* do not need to validate non root pipes */
+				break;
+			}
+		}
+	}
+
+	return DC_OK;
+}
+
+enum dc_status dce100_validate_bandwidth(
+	const struct core_dc *dc,
+	struct validate_context *context)
+{
+	/* TODO implement when needed but for now hardcode max value*/
+	context->bw_results.dispclk_khz = 681000;
+
+	return DC_OK;
+}
+
+static bool dce100_validate_surface_sets(
+		const struct dc_validation_set set[],
+		int set_count)
+{
+	int i;
+
+	for (i = 0; i < set_count; i++) {
+		if (set[i].surface_count == 0)
+			continue;
+
+		if (set[i].surface_count > 1)
+			return false;
+
+		if (set[i].surfaces[0]->clip_rect.width
+				!= set[i].target->streams[0]->src.width
+				|| set[i].surfaces[0]->clip_rect.height
+				!= set[i].target->streams[0]->src.height)
+			return false;
+		if (set[i].surfaces[0]->format
+				>= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+			return false;
+	}
+
+	return true;
+}
+
+enum dc_status dce100_validate_with_context(
+		const struct core_dc *dc,
+		const struct dc_validation_set set[],
+		int set_count,
+		struct validate_context *context)
+{
+	struct dc_context *dc_ctx = dc->ctx;
+	enum dc_status result = DC_ERROR_UNEXPECTED;
+	int i;
+
+	if (!dce100_validate_surface_sets(set, set_count))
+		return DC_FAIL_SURFACE_VALIDATE;
+
+	context->res_ctx.pool = dc->res_pool;
+
+	for (i = 0; i < set_count; i++) {
+		context->targets[i] = DC_TARGET_TO_CORE(set[i].target);
+		dc_target_retain(&context->targets[i]->public);
+		context->target_count++;
+	}
+
+	result = resource_map_pool_resources(dc, context);
+
+	if (result == DC_OK)
+		result = resource_map_clock_resources(dc, context);
+
+	if (!resource_validate_attach_surfaces(
+			set, set_count, dc->current_context, context)) {
+		DC_ERROR("Failed to attach surface to target!\n");
+		return DC_FAIL_ATTACH_SURFACES;
+	}
+
+	if (result == DC_OK)
+		result = validate_mapped_resource(dc, context);
+
+	if (result == DC_OK)
+		result = resource_build_scaling_params_for_context(dc, context);
+
+	if (result == DC_OK)
+		result = dce100_validate_bandwidth(dc, context);
+
+	return result;
+}
+
+enum dc_status dce100_validate_guaranteed(
+		const struct core_dc *dc,
+		const struct dc_target *dc_target,
+		struct validate_context *context)
+{
+	enum dc_status result = DC_ERROR_UNEXPECTED;
+
+	context->res_ctx.pool = dc->res_pool;
+
+	context->targets[0] = DC_TARGET_TO_CORE(dc_target);
+	dc_target_retain(&context->targets[0]->public);
+	context->target_count++;
+
+	result = resource_map_pool_resources(dc, context);
+
+	if (result == DC_OK)
+		result = resource_map_clock_resources(dc, context);
+
+	if (result == DC_OK)
+		result = validate_mapped_resource(dc, context);
+
+	if (result == DC_OK) {
+		validate_guaranteed_copy_target(
+				context, dc->public.caps.max_targets);
+		result = resource_build_scaling_params_for_context(dc, context);
+	}
+
+	if (result == DC_OK)
+		result = dce100_validate_bandwidth(dc, context);
+
+	return result;
+}
+
+static void dce100_destroy_resource_pool(struct resource_pool **pool)
+{
+	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+	destruct(dce110_pool);
+	dm_free(dce110_pool);
+	*pool = NULL;
+}
+
+static const struct resource_funcs dce100_res_pool_funcs = {
+	.destroy = dce100_destroy_resource_pool,
+	.link_enc_create = dce100_link_encoder_create,
+	.validate_with_context = dce100_validate_with_context,
+	.validate_guaranteed = dce100_validate_guaranteed,
+	.validate_bandwidth = dce100_validate_bandwidth
+};
+
+static bool construct(
+	uint8_t num_virtual_links,
+	struct core_dc *dc,
+	struct dce110_resource_pool *pool)
+{
+	unsigned int i;
+	struct dc_context *ctx = dc->ctx;
+	struct firmware_info info;
+	struct dc_bios *bp;
+	struct dm_pp_static_clock_info static_clk_info = {0};
+
+	ctx->dc_bios->regs = &bios_regs;
+
+	pool->base.res_cap = &res_cap;
+	pool->base.funcs = &dce100_res_pool_funcs;
+	pool->base.underlay_pipe_index = -1;
+
+	bp = ctx->dc_bios;
+
+	if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
+		info.external_clock_source_frequency_for_dp != 0) {
+		pool->base.dp_clock_source =
+				dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+		pool->base.clock_sources[0] =
+				dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+		pool->base.clock_sources[1] =
+				dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+		pool->base.clock_sources[2] =
+				dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+		pool->base.clk_src_count = 3;
+
+	} else {
+		pool->base.dp_clock_source =
+				dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+
+		pool->base.clock_sources[0] =
+				dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+		pool->base.clock_sources[1] =
+				dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+		pool->base.clk_src_count = 2;
+	}
+
+	if (pool->base.dp_clock_source == NULL) {
+		dm_error("DC: failed to create dp clock source!\n");
+		BREAK_TO_DEBUGGER();
+		goto res_create_fail;
+	}
+
+	for (i = 0; i < pool->base.clk_src_count; i++) {
+		if (pool->base.clock_sources[i] == NULL) {
+			dm_error("DC: failed to create clock sources!\n");
+			BREAK_TO_DEBUGGER();
+			goto res_create_fail;
+		}
+	}
+
+	pool->base.display_clock = dal_display_clock_dce110_create(ctx);
+	if (pool->base.display_clock == NULL) {
+		dm_error("DC: failed to create display clock!\n");
+		BREAK_TO_DEBUGGER();
+		goto res_create_fail;
+	}
+
+
+	/* get static clock information for PPLIB or firmware, save
+	 * max_clock_state
+	 */
+	if (dm_pp_get_static_clocks(ctx, &static_clk_info)) {
+		enum clocks_state max_clocks_state =
+			dce110_resource_convert_clock_state_pp_to_dc(
+					static_clk_info.max_clocks_state);
+
+		dal_display_clock_store_max_clocks_state(
+				pool->base.display_clock, max_clocks_state);
+	}
+	{
+		struct irq_service_init_data init_data;
+		init_data.ctx = dc->ctx;
+		pool->base.irqs = dal_irq_service_dce110_create(&init_data);
+		if (!pool->base.irqs)
+			goto res_create_fail;
+	}
+
+	/*************************************************
+	*  Resource + asic cap harcoding                *
+	*************************************************/
+	pool->base.underlay_pipe_index = -1;
+	pool->base.pipe_count = res_cap.num_timing_generator;
+	dc->public.caps.max_downscale_ratio = 200;
+	dc->public.caps.i2c_speed_in_khz = 40;
+
+	for (i = 0; i < pool->base.pipe_count; i++) {
+		pool->base.timing_generators[i] =
+			dce100_timing_generator_create(
+				ctx,
+				i,
+				&dce100_tg_offsets[i]);
+		if (pool->base.timing_generators[i] == NULL) {
+			BREAK_TO_DEBUGGER();
+			dm_error("DC: failed to create tg!\n");
+			goto res_create_fail;
+		}
+
+		pool->base.mis[i] = dce100_mem_input_create(ctx, i,
+				&dce100_mi_reg_offsets[i]);
+		if (pool->base.mis[i] == NULL) {
+			BREAK_TO_DEBUGGER();
+			dm_error(
+				"DC: failed to create memory input!\n");
+			goto res_create_fail;
+		}
+
+		pool->base.ipps[i] = dce100_ipp_create(ctx, i,
+				&dce100_ipp_reg_offsets[i]);
+		if (pool->base.ipps[i] == NULL) {
+			BREAK_TO_DEBUGGER();
+			dm_error(
+				"DC: failed to create input pixel processor!\n");
+			goto res_create_fail;
+		}
+
+		pool->base.transforms[i] = dce100_transform_create(ctx, i);
+		if (pool->base.transforms[i] == NULL) {
+			BREAK_TO_DEBUGGER();
+			dm_error(
+				"DC: failed to create transform!\n");
+			goto res_create_fail;
+		}
+
+		pool->base.opps[i] = dce100_opp_create(ctx, i, &dce100_opp_reg_offsets[i]);
+		if (pool->base.opps[i] == NULL) {
+			BREAK_TO_DEBUGGER();
+			dm_error(
+				"DC: failed to create output pixel processor!\n");
+			goto res_create_fail;
+		}
+	}
+
+	if (!resource_construct(num_virtual_links, dc, &pool->base,
+			&res_create_funcs))
+		goto res_create_fail;
+
+	/* Create hardware sequencer */
+	if (!dce100_hw_sequencer_construct(dc))
+		goto res_create_fail;
+
+	return true;
+
+res_create_fail:
+	destruct(pool);
+
+	return false;
+}
+
+struct resource_pool *dce100_create_resource_pool(
+	uint8_t num_virtual_links,
+	struct core_dc *dc)
+{
+	struct dce110_resource_pool *pool =
+		dm_alloc(sizeof(struct dce110_resource_pool));
+
+	if (!pool)
+		return NULL;
+
+	if (construct(num_virtual_links, dc, pool))
+		return &pool->base;
+
+	BREAK_TO_DEBUGGER();
+	return NULL;
+}
+

+ 19 - 0
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h

@@ -0,0 +1,19 @@
+/*
+ * dce100_resource.h
+ *
+ *  Created on: 2016-01-20
+ *      Author: qyang
+ */
+
+#ifndef DCE100_RESOURCE_H_
+#define DCE100_RESOURCE_H_
+
+struct core_dc;
+struct resource_pool;
+struct dc_validation_set;
+
+struct resource_pool *dce100_create_resource_pool(
+	uint8_t num_virtual_links,
+	struct core_dc *dc);
+
+#endif /* DCE100_RESOURCE_H_ */

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно