Browse Source

Merge tag 'drm-for-v4.15-amd-dc' of git://people.freedesktop.org/~airlied/linux

Pull amdgpu DC display code for Vega from Dave Airlie:
 "This is the pull request for the AMD DC (display code) layer which is
  a requirement to program the display engines on the new Vega and Raven
  based GPUs. It also contains support for all amdgpu supported GPUs
  (CIK, VI, Polaris), which has to be enabled. It is also a kms atomic
  modesetting compatible driver (unlike the current in-tree display
  code).

  I've kept it separate from drm-next because it may have some things
  that cause you to reject it.

  Background story:

  AMD have an internal team creating a shared OS codebase for display at
  hw bring up time using information from their hardware teams. This
  process doesn't lead to the most Linux friendly/looking code but we
  have worked together on cleaning a lot of it up and dealing with
  sparse/smatch/checkpatch, and having their team internally adhere to
  Linux coding standards.

  This tree is a complete history rebased since they started opening it,
  we decided not to squash it down as the history may have some value.
  Some of the commits therefore might not reach kernel standards, and we
  are steadily training people in AMD to better write commit msgs.

  There is a major bunch of generated bandwidth calculation and
  verification code that comes from their hardware team. On Vega and
  before this is float calculations, on Raven (DCN10) this is double
  based. They do the required things to do FP in the kernel, and I could
  understand this might raise some issues. Rewriting the bandwidth would
  be a major undertaken in reverification, it's non-trivial to work out
  if a display can handle the complete set of mode information thrown at
  it.

  Future story:

  There is a TODO list with this, and it address most of the remaining
  things that would be nice to refine/remove. The DCN10 code is still
  under development internally and they push out a lot of patches quite
  regularly and are supporting this code base with their display team. I
  think we've reached the point where keeping it out of tree is going to
  motivate distributions to start carrying the code, so I'd prefer we
  get it in tree. I think this code is slightly better than STAGING
  quality but not massively so, I'd really like to see that float/double
  magic gone and fixed point used, but AMD don't seem to think the
  accuracy and revalidation of the code is worth the effort"

* tag 'drm-for-v4.15-amd-dc' of git://people.freedesktop.org/~airlied/linux: (1110 commits)
  drm/amd/display: fix MST link training fail division by 0
  drm/amd/display: Fix formatting for null pointer dereference fix
  drm/amd/display: Remove dangling planes on dc commit state
  drm/amd/display: add flip_immediate to commit update for stream
  drm/amd/display: Miss register MST encoder cbs
  drm/amd/display: Fix warnings on S3 resume
  drm/amd/display: use num_timing_generator instead of pipe_count
  drm/amd/display: use configurable FBC option in dm
  drm/amd/display: fix AZ clock not enabled before program AZ endpoint
  amdgpu/dm: Don't use DRM_ERROR in amdgpu_dm_atomic_check
  amd/display: Fix potential null dereference in dce_calcs.c
  amdgpu/dm: Remove unused forward declaration
  drm/amdgpu: Remove unused dc_stream from amdgpu_crtc
  amdgpu/dc: Fix double unlock in amdgpu_dm_commit_planes
  amdgpu/dc: Fix missing null checks in amdgpu_dm.c
  amdgpu/dc: Fix potential null dereferences in amdgpu_dm.c
  amdgpu/dc: fix more indentation warnings
  amdgpu/dc: handle allocation failures in dc_commit_planes_to_stream.
  amdgpu/dc: fix indentation warning from smatch.
  amdgpu/dc: fix non-ansi function decls.
  ...
Linus Torvalds 7 years ago
parent
commit
f6705bf959
100 changed files with 46388 additions and 75 deletions
  1. 10 0
      Documentation/gpu/todo.rst
  2. 1 0
      drivers/gpu/drm/amd/amdgpu/Kconfig
  3. 16 1
      drivers/gpu/drm/amd/amdgpu/Makefile
  4. 16 0
      drivers/gpu/drm/amd/amdgpu/amdgpu.h
  5. 34 25
      drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
  6. 89 16
      drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
  7. 2 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
  8. 33 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
  9. 1 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
  10. 17 9
      drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
  11. 2 6
      drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
  12. 19 10
      drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
  13. 1 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
  14. 94 3
      drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
  15. 1 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
  16. 17 0
      drivers/gpu/drm/amd/amdgpu/cik.c
  17. 12 0
      drivers/gpu/drm/amd/amdgpu/soc15.c
  18. 21 0
      drivers/gpu/drm/amd/amdgpu/vi.c
  19. 45 0
      drivers/gpu/drm/amd/display/Kconfig
  20. 22 0
      drivers/gpu/drm/amd/display/Makefile
  21. 107 0
      drivers/gpu/drm/amd/display/TODO
  22. 17 0
      drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
  23. 4925 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
  24. 259 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
  25. 498 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
  26. 755 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
  27. 102 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
  28. 446 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
  29. 35 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
  30. 379 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
  31. 33 0
      drivers/gpu/drm/amd/display/dc/Makefile
  32. 11 0
      drivers/gpu/drm/amd/display/dc/basics/Makefile
  33. 104 0
      drivers/gpu/drm/amd/display/dc/basics/conversion.c
  34. 46 0
      drivers/gpu/drm/amd/display/dc/basics/conversion.h
  35. 567 0
      drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
  36. 161 0
      drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c
  37. 75 0
      drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c
  38. 102 0
      drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
  39. 397 0
      drivers/gpu/drm/amd/display/dc/basics/logger.c
  40. 30 0
      drivers/gpu/drm/amd/display/dc/basics/logger.h
  41. 307 0
      drivers/gpu/drm/amd/display/dc/basics/vector.c
  42. 27 0
      drivers/gpu/drm/amd/display/dc/bios/Makefile
  43. 3871 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
  44. 33 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser.h
  45. 1934 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
  46. 33 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser2.h
  47. 288 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c
  48. 33 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.h
  49. 82 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
  50. 40 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
  51. 56 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c
  52. 72 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h
  53. 74 0
      drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal2.h
  54. 2424 0
      drivers/gpu/drm/amd/display/dc/bios/command_table.c
  55. 102 0
      drivers/gpu/drm/amd/display/dc/bios/command_table.h
  56. 812 0
      drivers/gpu/drm/amd/display/dc/bios/command_table2.c
  57. 105 0
      drivers/gpu/drm/amd/display/dc/bios/command_table2.h
  58. 290 0
      drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
  59. 90 0
      drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
  60. 265 0
      drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
  61. 82 0
      drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h
  62. 364 0
      drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
  63. 34 0
      drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h
  64. 418 0
      drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
  65. 34 0
      drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h
  66. 418 0
      drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
  67. 34 0
      drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h
  68. 354 0
      drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
  69. 33 0
      drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h
  70. 18 0
      drivers/gpu/drm/amd/display/dc/calcs/Makefile
  71. 191 0
      drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c
  72. 197 0
      drivers/gpu/drm/amd/display/dc/calcs/custom_float.c
  73. 3257 0
      drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
  74. 1899 0
      drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
  75. 37 0
      drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h
  76. 120 0
      drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c
  77. 40 0
      drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h
  78. 1626 0
      drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
  79. 1677 0
      drivers/gpu/drm/amd/display/dc/core/dc.c
  80. 359 0
      drivers/gpu/drm/amd/display/dc/core/dc_debug.c
  81. 101 0
      drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
  82. 2367 0
      drivers/gpu/drm/amd/display/dc/core/dc_link.c
  83. 775 0
      drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
  84. 2587 0
      drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
  85. 331 0
      drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
  86. 2795 0
      drivers/gpu/drm/amd/display/dc/core/dc_resource.c
  87. 104 0
      drivers/gpu/drm/amd/display/dc/core/dc_sink.c
  88. 398 0
      drivers/gpu/drm/amd/display/dc/core/dc_stream.c
  89. 193 0
      drivers/gpu/drm/amd/display/dc/core/dc_surface.c
  90. 1103 0
      drivers/gpu/drm/amd/display/dc/dc.h
  91. 218 0
      drivers/gpu/drm/amd/display/dc/dc_bios_types.h
  92. 115 0
      drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
  93. 467 0
      drivers/gpu/drm/amd/display/dc/dc_dp_types.h
  94. 171 0
      drivers/gpu/drm/amd/display/dc/dc_helper.c
  95. 706 0
      drivers/gpu/drm/amd/display/dc/dc_hw_types.h
  96. 652 0
      drivers/gpu/drm/amd/display/dc/dc_types.h
  97. 15 0
      drivers/gpu/drm/amd/display/dc/dce/Makefile
  98. 485 0
      drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
  99. 228 0
      drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
  100. 945 0
      drivers/gpu/drm/amd/display/dc/dce/dce_audio.c

+ 10 - 0
Documentation/gpu/todo.rst

@@ -409,5 +409,15 @@ those drivers as simple as possible, so lots of room for refactoring:
 
 Contact: Noralf Trønnes, Daniel Vetter
 
+AMD DC Display Driver
+---------------------
+
+AMD DC is the display driver for AMD devices starting with Vega. There has been
+a bunch of progress cleaning it up but there's still plenty of work to be done.
+
+See drivers/gpu/drm/amd/display/TODO for tasks.
+
+Contact: Harry Wentland, Alex Deucher
+
 Outside DRM
 ===========

+ 1 - 0
drivers/gpu/drm/amd/amdgpu/Kconfig

@@ -41,3 +41,4 @@ config DRM_AMDGPU_GART_DEBUGFS
 	  pages. Uses more memory for housekeeping, enable only for debugging.
 
 source "drivers/gpu/drm/amd/acp/Kconfig"
+source "drivers/gpu/drm/amd/display/Kconfig"

+ 16 - 1
drivers/gpu/drm/amd/amdgpu/Makefile

@@ -4,13 +4,19 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 FULL_AMD_PATH=$(src)/..
+DISPLAY_FOLDER_NAME=display
+FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
 
 ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
 	-I$(FULL_AMD_PATH)/include \
 	-I$(FULL_AMD_PATH)/amdgpu \
 	-I$(FULL_AMD_PATH)/scheduler \
 	-I$(FULL_AMD_PATH)/powerplay/inc \
-	-I$(FULL_AMD_PATH)/acp/include
+	-I$(FULL_AMD_PATH)/acp/include \
+	-I$(FULL_AMD_DISPLAY_PATH) \
+	-I$(FULL_AMD_DISPLAY_PATH)/include \
+	-I$(FULL_AMD_DISPLAY_PATH)/dc \
+	-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm
 
 amdgpu-y := amdgpu_drv.o
 
@@ -133,4 +139,13 @@ include $(FULL_AMD_PATH)/powerplay/Makefile
 
 amdgpu-y += $(AMD_POWERPLAY_FILES)
 
+ifneq ($(CONFIG_DRM_AMD_DC),)
+
+RELATIVE_AMD_DISPLAY_PATH = ../$(DISPLAY_FOLDER_NAME)
+include $(FULL_AMD_DISPLAY_PATH)/Makefile
+
+amdgpu-y += $(AMD_DISPLAY_FILES)
+
+endif
+
 obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o

+ 16 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu.h

@@ -66,6 +66,7 @@
 #include "amdgpu_vce.h"
 #include "amdgpu_vcn.h"
 #include "amdgpu_mn.h"
+#include "amdgpu_dm.h"
 
 #include "gpu_scheduler.h"
 #include "amdgpu_virt.h"
@@ -101,6 +102,8 @@ extern int amdgpu_vm_fragment_size;
 extern int amdgpu_vm_fault_stop;
 extern int amdgpu_vm_debug;
 extern int amdgpu_vm_update_mode;
+extern int amdgpu_dc;
+extern int amdgpu_dc_log;
 extern int amdgpu_sched_jobs;
 extern int amdgpu_sched_hw_submission;
 extern int amdgpu_no_evict;
@@ -1535,6 +1538,7 @@ struct amdgpu_device {
 	/* display */
 	bool				enable_virtual_display;
 	struct amdgpu_mode_info		mode_info;
+	/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
 	struct work_struct		hotplug_work;
 	struct amdgpu_irq_src		crtc_irq;
 	struct amdgpu_irq_src		pageflip_irq;
@@ -1590,6 +1594,9 @@ struct amdgpu_device {
 	/* GDS */
 	struct amdgpu_gds		gds;
 
+	/* display related functionality */
+	struct amdgpu_display_manager dm;
+
 	struct amdgpu_ip_block          ip_blocks[AMDGPU_MAX_IP_NUM];
 	int				num_ip_blocks;
 	struct mutex	mn_lock;
@@ -1653,6 +1660,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
 
+bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
+bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
+
 /*
  * Registers read & write functions.
  */
@@ -1911,5 +1921,11 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
 			   uint64_t addr, struct amdgpu_bo **bo,
 			   struct amdgpu_bo_va_mapping **mapping);
 
+#if defined(CONFIG_DRM_AMD_DC)
+int amdgpu_dm_display_resume(struct amdgpu_device *adev );
+#else
+static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
+#endif
+
 #include "amdgpu_object.h"
 #endif

+ 34 - 25
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c

@@ -911,10 +911,6 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
 					  struct cgs_display_info *info)
 {
 	CGS_FUNC_ADEV;
-	struct amdgpu_crtc *amdgpu_crtc;
-	struct drm_device *ddev = adev->ddev;
-	struct drm_crtc *crtc;
-	uint32_t line_time_us, vblank_lines;
 	struct cgs_mode_info *mode_info;
 
 	if (info == NULL)
@@ -928,30 +924,43 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
 		mode_info->ref_clock = adev->clock.spll.reference_freq;
 	}
 
-	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
-		list_for_each_entry(crtc,
-				&ddev->mode_config.crtc_list, head) {
-			amdgpu_crtc = to_amdgpu_crtc(crtc);
-			if (crtc->enabled) {
-				info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
-				info->display_count++;
-			}
-			if (mode_info != NULL &&
-				crtc->enabled && amdgpu_crtc->enabled &&
-				amdgpu_crtc->hw_mode.clock) {
-				line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
-							amdgpu_crtc->hw_mode.clock;
-				vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
-							amdgpu_crtc->hw_mode.crtc_vdisplay +
-							(amdgpu_crtc->v_border * 2);
-				mode_info->vblank_time_us = vblank_lines * line_time_us;
-				mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
-				mode_info->ref_clock = adev->clock.spll.reference_freq;
-				mode_info = NULL;
+	if (!amdgpu_device_has_dc_support(adev)) {
+		struct amdgpu_crtc *amdgpu_crtc;
+		struct drm_device *ddev = adev->ddev;
+		struct drm_crtc *crtc;
+		uint32_t line_time_us, vblank_lines;
+
+		if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+			list_for_each_entry(crtc,
+					&ddev->mode_config.crtc_list, head) {
+				amdgpu_crtc = to_amdgpu_crtc(crtc);
+				if (crtc->enabled) {
+					info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
+					info->display_count++;
+				}
+				if (mode_info != NULL &&
+					crtc->enabled && amdgpu_crtc->enabled &&
+					amdgpu_crtc->hw_mode.clock) {
+					line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
+								amdgpu_crtc->hw_mode.clock;
+					vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
+								amdgpu_crtc->hw_mode.crtc_vdisplay +
+								(amdgpu_crtc->v_border * 2);
+					mode_info->vblank_time_us = vblank_lines * line_time_us;
+					mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+					mode_info->ref_clock = adev->clock.spll.reference_freq;
+					mode_info = NULL;
+				}
 			}
 		}
+	} else {
+		info->display_count = adev->pm.pm_display_cfg.num_display;
+		if (mode_info != NULL) {
+			mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
+			mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
+			mode_info->ref_clock = adev->clock.spll.reference_freq;
+		}
 	}
-
 	return 0;
 }
 

+ 89 - 16
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

@@ -31,6 +31,7 @@
 #include <linux/debugfs.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/amdgpu_drm.h>
 #include <linux/vgaarb.h>
 #include <linux/vga_switcheroo.h>
@@ -2046,6 +2047,52 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
 	}
 }
 
+bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+{
+	switch (asic_type) {
+#if defined(CONFIG_DRM_AMD_DC)
+	case CHIP_BONAIRE:
+	case CHIP_HAWAII:
+	case CHIP_KAVERI:
+	case CHIP_CARRIZO:
+	case CHIP_STONEY:
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS10:
+	case CHIP_POLARIS12:
+	case CHIP_TONGA:
+	case CHIP_FIJI:
+#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
+		return amdgpu_dc != 0;
+#endif
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+		return amdgpu_dc > 0;
+	case CHIP_VEGA10:
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+	case CHIP_RAVEN:
+#endif
+		return amdgpu_dc != 0;
+#endif
+	default:
+		return false;
+	}
+}
+
+/**
+ * amdgpu_device_has_dc_support - check if dc is supported
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * Returns true for supported, false for not supported
+ */
+bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
+{
+	if (amdgpu_sriov_vf(adev))
+		return false;
+
+	return amdgpu_device_asic_has_dc_support(adev->asic_type);
+}
+
 /**
  * amdgpu_device_init - initialize the driver
  *
@@ -2100,7 +2147,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
 
-
 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
 		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
@@ -2242,7 +2288,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 			goto failed;
 		}
 		/* init i2c buses */
-		amdgpu_atombios_i2c_init(adev);
+		if (!amdgpu_device_has_dc_support(adev))
+			amdgpu_atombios_i2c_init(adev);
 	}
 
 	/* Fence driver */
@@ -2378,7 +2425,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 	adev->accel_working = false;
 	cancel_delayed_work_sync(&adev->late_init_work);
 	/* free i2c buses */
-	amdgpu_i2c_fini(adev);
+	if (!amdgpu_device_has_dc_support(adev))
+		amdgpu_i2c_fini(adev);
 	amdgpu_atombios_fini(adev);
 	kfree(adev->bios);
 	adev->bios = NULL;
@@ -2429,12 +2477,14 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 
 	drm_kms_helper_poll_disable(dev);
 
-	/* turn off display hw */
-	drm_modeset_lock_all(dev);
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+	if (!amdgpu_device_has_dc_support(adev)) {
+		/* turn off display hw */
+		drm_modeset_lock_all(dev);
+		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+		}
+		drm_modeset_unlock_all(dev);
 	}
-	drm_modeset_unlock_all(dev);
 
 	amdgpu_amdkfd_suspend(adev);
 
@@ -2577,13 +2627,25 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
 
 	/* blat the mode back in */
 	if (fbcon) {
-		drm_helper_resume_force_mode(dev);
-		/* turn on display hw */
-		drm_modeset_lock_all(dev);
-		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+		if (!amdgpu_device_has_dc_support(adev)) {
+			/* pre DCE11 */
+			drm_helper_resume_force_mode(dev);
+
+			/* turn on display hw */
+			drm_modeset_lock_all(dev);
+			list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+			}
+			drm_modeset_unlock_all(dev);
+		} else {
+			/*
+			 * There is no equivalent atomic helper to turn on
+			 * display, so we defined our own function for this,
+			 * once suspend resume is supported by the atomic
+			 * framework this will be reworked
+			 */
+			amdgpu_dm_display_resume(adev);
 		}
-		drm_modeset_unlock_all(dev);
 	}
 
 	drm_kms_helper_poll_enable(dev);
@@ -2600,7 +2662,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
 #ifdef CONFIG_PM
 	dev->dev->power.disable_depth++;
 #endif
-	drm_helper_hpd_irq_event(dev);
+	if (!amdgpu_device_has_dc_support(adev))
+		drm_helper_hpd_irq_event(dev);
+	else
+		drm_kms_helper_hotplug_event(dev);
 #ifdef CONFIG_PM
 	dev->dev->power.disable_depth--;
 #endif
@@ -2900,6 +2965,7 @@ give_up_reset:
  */
 int amdgpu_gpu_reset(struct amdgpu_device *adev)
 {
+	struct drm_atomic_state *state = NULL;
 	int i, r;
 	int resched;
 	bool need_full_reset, vram_lost = false;
@@ -2913,6 +2979,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
 
 	/* block TTM */
 	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+	/* store modesetting */
+	if (amdgpu_device_has_dc_support(adev))
+		state = drm_atomic_helper_suspend(adev->ddev);
 
 	/* block scheduler */
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3029,7 +3098,11 @@ out:
 		}
 	}
 
-	drm_helper_resume_force_mode(adev->ddev);
+	if (amdgpu_device_has_dc_support(adev)) {
+		r = drm_atomic_helper_resume(adev->ddev, state);
+		amdgpu_dm_display_resume(adev);
+	} else
+		drm_helper_resume_force_mode(adev->ddev);
 
 	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
 	if (r) {

+ 2 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c

@@ -518,7 +518,7 @@ amdgpu_framebuffer_init(struct drm_device *dev,
 	return 0;
 }
 
-static struct drm_framebuffer *
+struct drm_framebuffer *
 amdgpu_user_framebuffer_create(struct drm_device *dev,
 			       struct drm_file *file_priv,
 			       const struct drm_mode_fb_cmd2 *mode_cmd)
@@ -556,7 +556,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
 	return &amdgpu_fb->base;
 }
 
-static void amdgpu_output_poll_changed(struct drm_device *dev)
+void amdgpu_output_poll_changed(struct drm_device *dev)
 {
 	struct amdgpu_device *adev = dev->dev_private;
 	amdgpu_fb_output_poll_changed(adev);

+ 33 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_display.h

@@ -0,0 +1,33 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_DISPLAY_H__
+#define __AMDGPU_DISPLAY_H__
+
+struct drm_framebuffer *
+amdgpu_user_framebuffer_create(struct drm_device *dev,
+						       struct drm_file *file_priv,
+							   const struct drm_mode_fb_cmd2 *mode_cmd);
+
+void amdgpu_output_poll_changed(struct drm_device *dev);
+
+#endif

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h

@@ -433,7 +433,7 @@ struct amdgpu_pm {
 	uint32_t                fw_version;
 	uint32_t                pcie_gen_mask;
 	uint32_t                pcie_mlw_mask;
-	struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
+	struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
 };
 
 #define R600_SSTU_DFLT                               0

+ 17 - 9
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c

@@ -106,6 +106,8 @@ int amdgpu_vm_debug = 0;
 int amdgpu_vram_page_split = 512;
 int amdgpu_vm_update_mode = -1;
 int amdgpu_exp_hw_support = 0;
+int amdgpu_dc = -1;
+int amdgpu_dc_log = 0;
 int amdgpu_sched_jobs = 32;
 int amdgpu_sched_hw_submission = 2;
 int amdgpu_no_evict = 0;
@@ -211,6 +213,12 @@ module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
 MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
 module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
 
+MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
+module_param_named(dc, amdgpu_dc, int, 0444);
+
+MODULE_PARM_DESC(dc, "Display Core Log Level (0 = minimal (default), 1 = chatty");
+module_param_named(dc_log, amdgpu_dc_log, int, 0444);
+
 MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
 module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
 
@@ -518,15 +526,15 @@ static const struct pci_device_id pciidlist[] = {
 	{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 	{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 	/* Vega 10 */
-	{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
-	{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
-	{0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
-	{0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
-	{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
-	{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
-	{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
-	{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
-	{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
+	{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+	{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+	{0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+	{0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+	{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+	{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+	{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+	{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+	{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
 	/* Raven */
 	{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
 

+ 2 - 6
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c

@@ -42,11 +42,6 @@
    this contains a helper + a amdgpu fb
    the helper contains a pointer to amdgpu framebuffer baseclass.
 */
-struct amdgpu_fbdev {
-	struct drm_fb_helper helper;
-	struct amdgpu_framebuffer rfb;
-	struct amdgpu_device *adev;
-};
 
 static int
 amdgpufb_open(struct fb_info *info, int user)
@@ -353,7 +348,8 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
 	drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
 
 	/* disable all the possible outputs/crtcs before entering KMS mode */
-	drm_helper_disable_unused_functions(adev->ddev);
+	if (!amdgpu_device_has_dc_support(adev))
+		drm_helper_disable_unused_functions(adev->ddev);
 
 	drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
 	return 0;

+ 19 - 10
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c

@@ -37,6 +37,10 @@
 
 #include <linux/pm_runtime.h>
 
+#ifdef CONFIG_DRM_AMD_DC
+#include "amdgpu_dm_irq.h"
+#endif
+
 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
 
 /*
@@ -221,15 +225,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
 
 	spin_lock_init(&adev->irq.lock);
 
-	if (!adev->enable_virtual_display)
-		/* Disable vblank irqs aggressively for power-saving */
-		adev->ddev->vblank_disable_immediate = true;
-
-	r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
-	if (r) {
-		return r;
-	}
-
 	/* enable msi */
 	adev->irq.msi_enabled = false;
 
@@ -241,7 +236,21 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
 		}
 	}
 
-	INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
+	if (!amdgpu_device_has_dc_support(adev)) {
+		if (!adev->enable_virtual_display)
+			/* Disable vblank irqs aggressively for power-saving */
+			/* XXX: can this be enabled for DC? */
+			adev->ddev->vblank_disable_immediate = true;
+
+		r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
+		if (r)
+			return r;
+
+		/* pre DCE11 */
+		INIT_WORK(&adev->hotplug_work,
+				amdgpu_hotplug_work_func);
+	}
+
 	INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
 
 	adev->irq.installed = true;

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c

@@ -1030,7 +1030,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
-	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
 };
 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
 

+ 94 - 3
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h

@@ -38,11 +38,15 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_plane_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/hrtimer.h>
 #include "amdgpu_irq.h"
 
+#include <drm/drm_dp_mst_helper.h>
+#include "modules/inc/mod_freesync.h"
+
 struct amdgpu_bo;
 struct amdgpu_device;
 struct amdgpu_encoder;
@@ -53,9 +57,13 @@ struct amdgpu_hpd;
 #define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
 #define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
 #define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
+#define to_amdgpu_plane(x)	container_of(x, struct amdgpu_plane, base)
+
+#define to_dm_plane_state(x)	container_of(x, struct dm_plane_state, base);
 
 #define AMDGPU_MAX_HPD_PINS 6
 #define AMDGPU_MAX_CRTCS 6
+#define AMDGPU_MAX_PLANES 6
 #define AMDGPU_MAX_AFMT_BLOCKS 9
 
 enum amdgpu_rmx_type {
@@ -292,6 +300,30 @@ struct amdgpu_display_funcs {
 			      uint16_t connector_object_id,
 			      struct amdgpu_hpd *hpd,
 			      struct amdgpu_router *router);
+	/* it is used to enter or exit into free sync mode */
+	int (*notify_freesync)(struct drm_device *dev, void *data,
+			       struct drm_file *filp);
+	/* it is used to allow enablement of freesync mode */
+	int (*set_freesync_property)(struct drm_connector *connector,
+				     struct drm_property *property,
+				     uint64_t val);
+
+
+};
+
+struct amdgpu_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj;
+
+	/* caching for later use */
+	uint64_t address;
+};
+
+struct amdgpu_fbdev {
+	struct drm_fb_helper helper;
+	struct amdgpu_framebuffer rfb;
+	struct list_head fbdev_list;
+	struct amdgpu_device *adev;
 };
 
 struct amdgpu_mode_info {
@@ -299,6 +331,7 @@ struct amdgpu_mode_info {
 	struct card_info *atom_card_info;
 	bool mode_config_initialized;
 	struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
+	struct amdgpu_plane *planes[AMDGPU_MAX_PLANES];
 	struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
 	/* DVI-I properties */
 	struct drm_property *coherent_mode_property;
@@ -328,6 +361,7 @@ struct amdgpu_mode_info {
 	int			num_dig; /* number of dig blocks */
 	int			disp_priority;
 	const struct amdgpu_display_funcs *funcs;
+	const enum drm_plane_type *plane_type;
 };
 
 #define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -400,6 +434,14 @@ struct amdgpu_crtc {
 	/* for virtual dce */
 	struct hrtimer vblank_timer;
 	enum amdgpu_interrupt_state vsync_timer_enabled;
+
+	int otg_inst;
+	struct drm_pending_vblank_event *event;
+};
+
+struct amdgpu_plane {
+	struct drm_plane base;
+	enum drm_plane_type plane_type;
 };
 
 struct amdgpu_encoder_atom_dig {
@@ -489,6 +531,19 @@ enum amdgpu_connector_dither {
 	AMDGPU_FMT_DITHER_ENABLE = 1,
 };
 
+struct amdgpu_dm_dp_aux {
+	struct drm_dp_aux aux;
+	struct ddc_service *ddc_service;
+};
+
+struct amdgpu_i2c_adapter {
+	struct i2c_adapter base;
+
+	struct ddc_service *ddc_service;
+};
+
+#define TO_DM_AUX(x) container_of((x), struct amdgpu_dm_dp_aux, aux)
+
 struct amdgpu_connector {
 	struct drm_connector base;
 	uint32_t connector_id;
@@ -500,6 +555,14 @@ struct amdgpu_connector {
 	/* we need to mind the EDID between detect
 	   and get modes due to analog/digital/tvencoder */
 	struct edid *edid;
+	/* number of modes generated from EDID at 'dc_sink' */
+	int num_modes;
+	/* The 'old' sink - before an HPD.
+	 * The 'current' sink is in dc_link->sink. */
+	struct dc_sink *dc_sink;
+	struct dc_link *dc_link;
+	struct dc_sink *dc_em_sink;
+	const struct dc_stream *stream;
 	void *con_priv;
 	bool dac_load_detect;
 	bool detected_by_load; /* if the connection status was determined by load */
@@ -510,11 +573,39 @@ struct amdgpu_connector {
 	enum amdgpu_connector_audio audio;
 	enum amdgpu_connector_dither dither;
 	unsigned pixelclock_for_modeset;
+
+	struct drm_dp_mst_topology_mgr mst_mgr;
+	struct amdgpu_dm_dp_aux dm_dp_aux;
+	struct drm_dp_mst_port *port;
+	struct amdgpu_connector *mst_port;
+	struct amdgpu_encoder *mst_encoder;
+	struct semaphore mst_sem;
+
+	/* TODO see if we can merge with ddc_bus or make a dm_connector */
+	struct amdgpu_i2c_adapter *i2c;
+
+	/* Monitor range limits */
+	int min_vfreq ;
+	int max_vfreq ;
+	int pixel_clock_mhz;
+
+	/*freesync caps*/
+	struct mod_freesync_caps caps;
+
+	struct mutex hpd_lock;
+
 };
 
-struct amdgpu_framebuffer {
-	struct drm_framebuffer base;
-	struct drm_gem_object *obj;
+/* TODO: start to use this struct and remove same field from base one */
+struct amdgpu_mst_connector {
+	struct amdgpu_connector base;
+
+	struct drm_dp_mst_topology_mgr mst_mgr;
+	struct amdgpu_dm_dp_aux dm_dp_aux;
+	struct drm_dp_mst_port *port;
+	struct amdgpu_connector *mst_port;
+	bool is_mst_connector;
+	struct amdgpu_encoder *mst_encoder;
 };
 
 #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c

@@ -1466,7 +1466,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
 			list_for_each_entry(crtc,
 					    &ddev->mode_config.crtc_list, head) {
 				amdgpu_crtc = to_amdgpu_crtc(crtc);
-				if (crtc->enabled) {
+				if (amdgpu_crtc->enabled) {
 					adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
 					adev->pm.dpm.new_active_crtc_count++;
 				}

+ 17 - 0
drivers/gpu/drm/amd/amdgpu/cik.c

@@ -65,6 +65,7 @@
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
 
+#include "amdgpu_dm.h"
 #include "amdgpu_amdkfd.h"
 #include "amdgpu_powerplay.h"
 #include "dce_virtual.h"
@@ -1900,6 +1901,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display)
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
 		else
 			amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
 		amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
@@ -1914,6 +1919,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display)
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
 		else
 			amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
 		amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
@@ -1928,6 +1937,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display)
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
 		else
 			amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
 		amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
@@ -1943,6 +1956,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display)
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
 		else
 			amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
 		amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);

+ 12 - 0
drivers/gpu/drm/amd/amdgpu/soc15.c

@@ -532,6 +532,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 			amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#else
+#	warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
+#endif
 		amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
 		amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
 		amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
@@ -545,6 +551,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#else
+#	warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
+#endif
 		amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
 		amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
 		amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block);

+ 21 - 0
drivers/gpu/drm/amd/amdgpu/vi.c

@@ -77,6 +77,7 @@
 #endif
 #include "dce_virtual.h"
 #include "mxgpu_vi.h"
+#include "amdgpu_dm.h"
 
 /*
  * Indirect registers accessor
@@ -1502,6 +1503,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
 		else
 			amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1518,6 +1523,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
 		else
 			amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1536,6 +1545,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display)
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
 		else
 			amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1550,6 +1563,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display)
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
 		else
 			amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1567,6 +1584,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 		if (adev->enable_virtual_display)
 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+		else if (amdgpu_device_has_dc_support(adev))
+			amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
 		else
 			amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
 		amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);

+ 45 - 0
drivers/gpu/drm/amd/display/Kconfig

@@ -0,0 +1,45 @@
+menu "Display Engine Configuration"
+	depends on DRM && DRM_AMDGPU
+
+config DRM_AMD_DC
+	bool "AMD DC - Enable new display engine"
+	default y
+	help
+	  Choose this option if you want to use the new display engine
+	  support for AMDGPU. This adds required support for Vega and
+	  Raven ASICs.
+
+config DRM_AMD_DC_PRE_VEGA
+	bool "DC support for Polaris and older ASICs"
+	default n
+	help
+	  Choose this option to enable the new DC support for older asics
+	  by default. This includes Polaris, Carrizo, Tonga, Bonaire,
+	  and Hawaii.
+
+config DRM_AMD_DC_FBC
+	bool "AMD FBC - Enable Frame Buffer Compression"
+	depends on DRM_AMD_DC
+	help
+	  Choose this option if you want to use frame buffer compression
+	  support.
+	  This is a power optimisation feature, check its availability
+	  on your hardware before enabling this option.
+
+
+config DRM_AMD_DC_DCN1_0
+	bool "DCN 1.0 Raven family"
+	depends on DRM_AMD_DC && X86
+	help
+	  Choose this option if you want to have
+	  RV family for display engine
+
+config DEBUG_KERNEL_DC
+	bool "Enable kgdb break in DC"
+	depends on DRM_AMD_DC
+	help
+	  Choose this option
+	  if you want to hit
+	  kdgb_break in assert.
+
+endmenu

+ 22 - 0
drivers/gpu/drm/amd/display/Makefile

@@ -0,0 +1,22 @@
+#
+# Makefile for the DAL (Display Abstract Layer), which is a  sub-component
+# of the AMDGPU drm driver.
+# It provides the HW control for display related functionalities.
+
+AMDDALPATH = $(RELATIVE_AMD_DISPLAY_PATH)
+
+subdir-ccflags-y += -I$(AMDDALPATH)/ -I$(AMDDALPATH)/include
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
+
+#TODO: remove when Timing Sync feature is complete
+subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
+
+DAL_LIBS = amdgpu_dm dc	modules/freesync
+
+AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
+
+include $(AMD_DAL)

+ 107 - 0
drivers/gpu/drm/amd/display/TODO

@@ -0,0 +1,107 @@
+===============================================================================
+TODOs
+===============================================================================
+
+1. Base this on drm-next - WIP
+
+
+2. Cleanup commit history
+
+
+3. WIP - Drop page flip helper and use DRM's version
+
+
+4. DONE - Flatten all DC objects
+    * dc_stream/core_stream/stream should just be dc_stream
+    * Same for other DC objects
+
+    "Is there any major reason to keep all those abstractions?
+
+    Could you collapse everything into struct dc_stream?
+
+    I haven't looked recently but I didn't get the impression there was a
+    lot of design around what was public/protected, more whatever needed
+    to be used by someone else was in public."
+    ~ Dave Airlie
+
+
+5. DONE - Rename DC objects to align more with DRM
+    * dc_surface -> dc_plane_state
+    * dc_stream -> dc_stream_state
+
+
+6. DONE - Per-plane and per-stream validation
+
+
+7. WIP - Per-plane and per-stream commit
+
+
+8. WIP - Split pipe_ctx into plane and stream resource structs
+
+
+9. Attach plane and stream reources to state object instead of validate_context
+
+
+10. Remove dc_edid_caps and drm_helpers_parse_edid_caps
+    * Use drm_display_info instead
+    * Remove DC's edid quirks and rely on DRM's quirks (add quirks if needed)
+
+    "Making sure you use the sink-specific helper libraries and kernel
+    subsystems, since there's really no good reason to have 2nd
+    implementation of those in the kernel. Looks likes that's done for mst
+    and edid parsing. There's still a bit a midlayer feeling to the edid
+    parsing side (e.g. dc_edid_caps and dm_helpers_parse_edid_caps, I
+    think it'd be much better if you convert that over to reading stuff
+    from drm_display_info and if needed, push stuff into the core). Also,
+    I can't come up with a good reason why DC needs all this (except to
+    reimplement half of our edid quirk table, which really isn't a good
+    idea). Might be good if you put this onto the list of things to fix
+    long-term, but imo not a blocker. Definitely make sure new stuff
+    doesn't slip in (i.e. if you start adding edid quirks to DC instead of
+    the drm core, refactoring to use the core edid stuff was pointless)."
+    ~ Daniel Vetter
+
+
+11. Remove dc/i2caux. This folder can be somewhat misleading. It's basically an
+overy complicated HW programming function for sendind and receiving i2c/aux
+commands. We can greatly simplify that and move it into dc/dceXYZ like other
+HW blocks.
+
+12. drm_modeset_lock in MST should no longer be needed in recent kernels
+    * Adopt appropriate locking scheme
+
+13. get_modes and best_encoder callbacks look a bit funny. Can probably rip out
+a few indirections, and consider removing entirely and using the
+drm_atomic_helper_best_encoder default behaviour.
+
+14. core/dc_debug.c, consider switching to the atomic state debug helpers and
+moving all your driver state printing into the various atomic_print_state
+callbacks. There's also plans to expose this stuff in a standard way across all
+drivers, to make debugging userspace compositors easier across different hw.
+
+15. Move DP/HDMI dual mode adaptors to drm_dp_dual_mode_helper.c. See
+dal_ddc_service_i2c_query_dp_dual_mode_adaptor.
+
+16. Move to core SCDC helpers (I think those are new since initial DC review).
+
+17. There's still a pretty massive layer cake around dp aux and DPCD handling,
+with like 3 levels of abstraction and using your own structures instead of the
+stuff in drm_dp_helper.h. drm_dp_helper.h isn't really great and already has 2
+incompatible styles, just means more reasons not to add a third (or well third
+one gets to do the cleanup refactor).
+
+18. There's a pile of sink handling code, both for DP and HDMI where I didn't
+immediately recognize the standard. I think long term it'd be best for the drm
+subsystem if we try to move as much of that into helpers/core as possible, and
+share it with drivers. But that's a very long term goal, and by far not just an
+issue with DC - other drivers, especially around DP sink handling, are equally
+guilty.
+
+19. The DC logger is still a rather sore thing, but I know that the DRM_DEBUG
+stuff just isn't up to the challenges either. We need to figure out something
+that integrates better with DRM and linux debug printing, while not being
+useless with filtering output. dynamic debug printing might be an option.
+
+20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
+retimer that we need to program to pass PHY compliance. Currently that's
+bypassing the i2c device and goes directly to HW. This should be changed.

+ 17 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/Makefile

@@ -0,0 +1,17 @@
+#
+# Makefile for the 'dm' sub-component of DAL.
+# It provides the control and status of dm blocks.
+
+
+
+AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o
+
+ifneq ($(CONFIG_DRM_AMD_DC),)
+AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
+endif
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
+
+AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
+
+AMD_DISPLAY_FILES += $(AMDGPU_DM)

+ 4925 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c

@@ -0,0 +1,4925 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services_types.h"
+#include "dc.h"
+#include "dc/inc/core_types.h"
+
+#include "vid.h"
+#include "amdgpu.h"
+#include "amdgpu_display.h"
+#include "atom.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_pm.h"
+
+#include "amd_shared.h"
+#include "amdgpu_dm_irq.h"
+#include "dm_helpers.h"
+#include "dm_services_types.h"
+#include "amdgpu_dm_mst_types.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/types.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_edid.h>
+
+#include "modules/inc/mod_freesync.h"
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "ivsrcid/irqsrcs_dcn_1_0.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#include "soc15_common.h"
+#endif
+
+#include "modules/inc/mod_freesync.h"
+
+#include "i2caux_interface.h"
+
+/* basic init/fini API */
+static int amdgpu_dm_init(struct amdgpu_device *adev);
+static void amdgpu_dm_fini(struct amdgpu_device *adev);
+
+/* initializes drm_device display related structures, based on the information
+ * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
+ * drm_encoder, drm_mode_config
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
+/* removes and deallocates the drm structures, created by the above function */
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
+
+static void
+amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
+
+static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+				struct amdgpu_plane *aplane,
+				unsigned long possible_crtcs);
+static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+			       struct drm_plane *plane,
+			       uint32_t link_index);
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+				    struct amdgpu_dm_connector *amdgpu_dm_connector,
+				    uint32_t link_index,
+				    struct amdgpu_encoder *amdgpu_encoder);
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+				  struct amdgpu_encoder *aencoder,
+				  uint32_t link_index);
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+
+static int amdgpu_dm_atomic_commit(struct drm_device *dev,
+				   struct drm_atomic_state *state,
+				   bool nonblock);
+
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
+
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+				  struct drm_atomic_state *state);
+
+
+
+
+static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
+	DRM_PLANE_TYPE_PRIMARY,
+	DRM_PLANE_TYPE_PRIMARY,
+	DRM_PLANE_TYPE_PRIMARY,
+	DRM_PLANE_TYPE_PRIMARY,
+	DRM_PLANE_TYPE_PRIMARY,
+	DRM_PLANE_TYPE_PRIMARY,
+};
+
+static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
+	DRM_PLANE_TYPE_PRIMARY,
+	DRM_PLANE_TYPE_PRIMARY,
+	DRM_PLANE_TYPE_PRIMARY,
+	DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
+};
+
+static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
+	DRM_PLANE_TYPE_PRIMARY,
+	DRM_PLANE_TYPE_PRIMARY,
+	DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
+};
+
+/*
+ * dm_vblank_get_counter
+ *
+ * @brief
+ * Get counter for number of vertical blanks
+ *
+ * @param
+ * struct amdgpu_device *adev - [in] desired amdgpu device
+ * int disp_idx - [in] which CRTC to get the counter from
+ *
+ * @return
+ * Counter for vertical blanks
+ */
+static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+	if (crtc >= adev->mode_info.num_crtc)
+		return 0;
+	else {
+		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
+		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
+				acrtc->base.state);
+
+
+		if (acrtc_state->stream == NULL) {
+			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+				  crtc);
+			return 0;
+		}
+
+		return dc_stream_get_vblank_counter(acrtc_state->stream);
+	}
+}
+
+static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+				  u32 *vbl, u32 *position)
+{
+	uint32_t v_blank_start, v_blank_end, h_position, v_position;
+
+	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+		return -EINVAL;
+	else {
+		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
+		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
+						acrtc->base.state);
+
+		if (acrtc_state->stream ==  NULL) {
+			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+				  crtc);
+			return 0;
+		}
+
+		/*
+		 * TODO rework base driver to use values directly.
+		 * for now parse it back into reg-format
+		 */
+		dc_stream_get_scanoutpos(acrtc_state->stream,
+					 &v_blank_start,
+					 &v_blank_end,
+					 &h_position,
+					 &v_position);
+
+		*position = v_position | (h_position << 16);
+		*vbl = v_blank_start | (v_blank_end << 16);
+	}
+
+	return 0;
+}
+
+static bool dm_is_idle(void *handle)
+{
+	/* XXX todo */
+	return true;
+}
+
+static int dm_wait_for_idle(void *handle)
+{
+	/* XXX todo */
+	return 0;
+}
+
+static bool dm_check_soft_reset(void *handle)
+{
+	return false;
+}
+
+static int dm_soft_reset(void *handle)
+{
+	/* XXX todo */
+	return 0;
+}
+
+static struct amdgpu_crtc *
+get_crtc_by_otg_inst(struct amdgpu_device *adev,
+		     int otg_inst)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_crtc *crtc;
+	struct amdgpu_crtc *amdgpu_crtc;
+
+	/*
+	 * following if is check inherited from both functions where this one is
+	 * used now. Need to be checked why it could happen.
+	 */
+	if (otg_inst == -1) {
+		WARN_ON(1);
+		return adev->mode_info.crtcs[0];
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+		if (amdgpu_crtc->otg_inst == otg_inst)
+			return amdgpu_crtc;
+	}
+
+	return NULL;
+}
+
+static void dm_pflip_high_irq(void *interrupt_params)
+{
+	struct amdgpu_crtc *amdgpu_crtc;
+	struct common_irq_params *irq_params = interrupt_params;
+	struct amdgpu_device *adev = irq_params->adev;
+	unsigned long flags;
+
+	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
+
+	/* IRQ could occur when in initial stage */
+	/*TODO work and BO cleanup */
+	if (amdgpu_crtc == NULL) {
+		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
+		return;
+	}
+
+	spin_lock_irqsave(&adev->ddev->event_lock, flags);
+
+	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
+						 amdgpu_crtc->pflip_status,
+						 AMDGPU_FLIP_SUBMITTED,
+						 amdgpu_crtc->crtc_id,
+						 amdgpu_crtc);
+		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+		return;
+	}
+
+
+	/* wakeup usersapce */
+	if (amdgpu_crtc->event) {
+		/* Update to correct count/ts if racing with vblank irq */
+		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
+
+		drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
+
+		/* page flip completed. clean up */
+		amdgpu_crtc->event = NULL;
+
+	} else
+		WARN_ON(1);
+
+	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+	DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
+					__func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
+
+	drm_crtc_vblank_put(&amdgpu_crtc->base);
+}
+
+static void dm_crtc_high_irq(void *interrupt_params)
+{
+	struct common_irq_params *irq_params = interrupt_params;
+	struct amdgpu_device *adev = irq_params->adev;
+	uint8_t crtc_index = 0;
+	struct amdgpu_crtc *acrtc;
+
+	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
+
+	if (acrtc)
+		crtc_index = acrtc->crtc_id;
+
+	drm_handle_vblank(adev->ddev, crtc_index);
+}
+
+static int dm_set_clockgating_state(void *handle,
+		  enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int dm_set_powergating_state(void *handle,
+		  enum amd_powergating_state state)
+{
+	return 0;
+}
+
+/* Prototypes of private functions */
+static int dm_early_init(void* handle);
+
+static void hotplug_notify_work_func(struct work_struct *work)
+{
+	struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
+	struct drm_device *dev = dm->ddev;
+
+	drm_kms_helper_hotplug_event(dev);
+}
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+#include "dal_asic_id.h"
+/* Allocate memory for FBC compressed data  */
+/* TODO: Dynamic allocation */
+#define AMDGPU_FBC_SIZE    (3840 * 2160 * 4)
+
+static void amdgpu_dm_initialize_fbc(struct amdgpu_device *adev)
+{
+	int r;
+	struct dm_comressor_info *compressor = &adev->dm.compressor;
+
+	if (!compressor->bo_ptr) {
+		r = amdgpu_bo_create_kernel(adev, AMDGPU_FBC_SIZE, PAGE_SIZE,
+				AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr,
+				&compressor->gpu_addr, &compressor->cpu_addr);
+
+		if (r)
+			DRM_ERROR("DM: Failed to initialize fbc\n");
+	}
+
+}
+#endif
+
+
+/* Init display KMS
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_init(struct amdgpu_device *adev)
+{
+	struct dc_init_data init_data;
+	adev->dm.ddev = adev->ddev;
+	adev->dm.adev = adev;
+
+	/* Zero all the fields */
+	memset(&init_data, 0, sizeof(init_data));
+
+	/* initialize DAL's lock (for SYNC context use) */
+	spin_lock_init(&adev->dm.dal_lock);
+
+	/* initialize DAL's mutex */
+	mutex_init(&adev->dm.dal_mutex);
+
+	if(amdgpu_dm_irq_init(adev)) {
+		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+		goto error;
+	}
+
+	init_data.asic_id.chip_family = adev->family;
+
+	init_data.asic_id.pci_revision_id = adev->rev_id;
+	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
+
+	init_data.asic_id.vram_width = adev->mc.vram_width;
+	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
+	init_data.asic_id.atombios_base_address =
+		adev->mode_info.atom_context->bios;
+
+	init_data.driver = adev;
+
+	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
+
+	if (!adev->dm.cgs_device) {
+		DRM_ERROR("amdgpu: failed to create cgs device.\n");
+		goto error;
+	}
+
+	init_data.cgs_device = adev->dm.cgs_device;
+
+	adev->dm.dal = NULL;
+
+	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
+
+	if (amdgpu_dc_log)
+		init_data.log_mask = DC_DEFAULT_LOG_MASK;
+	else
+		init_data.log_mask = DC_MIN_LOG_MASK;
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+	if (adev->family == FAMILY_CZ)
+		amdgpu_dm_initialize_fbc(adev);
+	init_data.fbc_gpu_addr = adev->dm.compressor.gpu_addr;
+#endif
+	/* Display Core create. */
+	adev->dm.dc = dc_create(&init_data);
+
+	if (adev->dm.dc) {
+		DRM_INFO("Display Core initialized!\n");
+	} else {
+		DRM_INFO("Display Core failed to initialize!\n");
+		goto error;
+	}
+
+	INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
+
+	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
+	if (!adev->dm.freesync_module) {
+		DRM_ERROR(
+		"amdgpu: failed to initialize freesync_module.\n");
+	} else
+		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
+				adev->dm.freesync_module);
+
+	if (amdgpu_dm_initialize_drm_device(adev)) {
+		DRM_ERROR(
+		"amdgpu: failed to initialize sw for display support.\n");
+		goto error;
+	}
+
+	/* Update the actual used number of crtc */
+	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+
+	/* TODO: Add_display_info? */
+
+	/* TODO use dynamic cursor width */
+	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
+	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
+
+	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
+		DRM_ERROR(
+		"amdgpu: failed to initialize sw for display support.\n");
+		goto error;
+	}
+
+	DRM_DEBUG_DRIVER("KMS initialized.\n");
+
+	return 0;
+error:
+	amdgpu_dm_fini(adev);
+
+	return -1;
+}
+
+static void amdgpu_dm_fini(struct amdgpu_device *adev)
+{
+	amdgpu_dm_destroy_drm_device(&adev->dm);
+	/*
+	 * TODO: pageflip, vlank interrupt
+	 *
+	 * amdgpu_dm_irq_fini(adev);
+	 */
+
+	if (adev->dm.cgs_device) {
+		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
+		adev->dm.cgs_device = NULL;
+	}
+	if (adev->dm.freesync_module) {
+		mod_freesync_destroy(adev->dm.freesync_module);
+		adev->dm.freesync_module = NULL;
+	}
+	/* DC Destroy TODO: Replace destroy DAL */
+	if (adev->dm.dc)
+		dc_destroy(&adev->dm.dc);
+	return;
+}
+
+static int dm_sw_init(void *handle)
+{
+	return 0;
+}
+
+static int dm_sw_fini(void *handle)
+{
+	return 0;
+}
+
+static int detect_mst_link_for_all_connectors(struct drm_device *dev)
+{
+	struct amdgpu_dm_connector *aconnector;
+	struct drm_connector *connector;
+	int ret = 0;
+
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		aconnector = to_amdgpu_dm_connector(connector);
+		if (aconnector->dc_link->type == dc_connection_mst_branch) {
+			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+					aconnector, aconnector->base.base.id);
+
+			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+			if (ret < 0) {
+				DRM_ERROR("DM_MST: Failed to start MST\n");
+				((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
+				return ret;
+				}
+			}
+	}
+
+	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+	return ret;
+}
+
+static int dm_late_init(void *handle)
+{
+	struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
+
+	return detect_mst_link_for_all_connectors(dev);
+}
+
+static void s3_handle_mst(struct drm_device *dev, bool suspend)
+{
+	struct amdgpu_dm_connector *aconnector;
+	struct drm_connector *connector;
+
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		   aconnector = to_amdgpu_dm_connector(connector);
+		   if (aconnector->dc_link->type == dc_connection_mst_branch &&
+				   !aconnector->mst_port) {
+
+			   if (suspend)
+				   drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
+			   else
+				   drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
+		   }
+	}
+
+	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+static int dm_hw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	/* Create DAL display manager */
+	amdgpu_dm_init(adev);
+	amdgpu_dm_hpd_init(adev);
+
+	return 0;
+}
+
+static int dm_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_dm_hpd_fini(adev);
+
+	amdgpu_dm_irq_fini(adev);
+	amdgpu_dm_fini(adev);
+	return 0;
+}
+
+static int dm_suspend(void *handle)
+{
+	struct amdgpu_device *adev = handle;
+	struct amdgpu_display_manager *dm = &adev->dm;
+	int ret = 0;
+
+	s3_handle_mst(adev->ddev, true);
+
+	amdgpu_dm_irq_suspend(adev);
+
+	WARN_ON(adev->dm.cached_state);
+	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
+
+	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
+
+	return ret;
+}
+
+static struct amdgpu_dm_connector *
+amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
+					     struct drm_crtc *crtc)
+{
+	uint32_t i;
+	struct drm_connector_state *new_con_state;
+	struct drm_connector *connector;
+	struct drm_crtc *crtc_from_state;
+
+	for_each_new_connector_in_state(state, connector, new_con_state, i) {
+		crtc_from_state = new_con_state->crtc;
+
+		if (crtc_from_state == crtc)
+			return to_amdgpu_dm_connector(connector);
+	}
+
+	return NULL;
+}
+
+static int dm_resume(void *handle)
+{
+	struct amdgpu_device *adev = handle;
+	struct amdgpu_display_manager *dm = &adev->dm;
+
+	/* power on hardware */
+	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
+	return 0;
+}
+
+int amdgpu_dm_display_resume(struct amdgpu_device *adev)
+{
+	struct drm_device *ddev = adev->ddev;
+	struct amdgpu_display_manager *dm = &adev->dm;
+	struct amdgpu_dm_connector *aconnector;
+	struct drm_connector *connector;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *new_crtc_state;
+	struct dm_crtc_state *dm_new_crtc_state;
+	struct drm_plane *plane;
+	struct drm_plane_state *new_plane_state;
+	struct dm_plane_state *dm_new_plane_state;
+
+	int ret = 0;
+	int i;
+
+	/* program HPD filter */
+	dc_resume(dm->dc);
+
+	/* On resume we need to  rewrite the MSTM control bits to enamble MST*/
+	s3_handle_mst(ddev, false);
+
+	/*
+	 * early enable HPD Rx IRQ, should be done before set mode as short
+	 * pulse interrupts are used for MST
+	 */
+	amdgpu_dm_irq_resume_early(adev);
+
+	/* Do detection*/
+	list_for_each_entry(connector,
+			&ddev->mode_config.connector_list, head) {
+		aconnector = to_amdgpu_dm_connector(connector);
+
+		/*
+		 * this is the case when traversing through already created
+		 * MST connectors, should be skipped
+		 */
+		if (aconnector->mst_port)
+			continue;
+
+		mutex_lock(&aconnector->hpd_lock);
+		dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+		aconnector->dc_sink = NULL;
+		amdgpu_dm_update_connector_after_detect(aconnector);
+		mutex_unlock(&aconnector->hpd_lock);
+	}
+
+	/* Force mode set in atomic comit */
+	for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
+		new_crtc_state->active_changed = true;
+
+	/*
+	 * atomic_check is expected to create the dc states. We need to release
+	 * them here, since they were duplicated as part of the suspend
+	 * procedure.
+	 */
+	for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
+		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+		if (dm_new_crtc_state->stream) {
+			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
+			dc_stream_release(dm_new_crtc_state->stream);
+			dm_new_crtc_state->stream = NULL;
+		}
+	}
+
+	for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
+		dm_new_plane_state = to_dm_plane_state(new_plane_state);
+		if (dm_new_plane_state->dc_state) {
+			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
+			dc_plane_state_release(dm_new_plane_state->dc_state);
+			dm_new_plane_state->dc_state = NULL;
+		}
+	}
+
+	ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
+
+	drm_atomic_state_put(adev->dm.cached_state);
+	adev->dm.cached_state = NULL;
+
+	amdgpu_dm_irq_resume_late(adev);
+
+	return ret;
+}
+
+static const struct amd_ip_funcs amdgpu_dm_funcs = {
+	.name = "dm",
+	.early_init = dm_early_init,
+	.late_init = dm_late_init,
+	.sw_init = dm_sw_init,
+	.sw_fini = dm_sw_fini,
+	.hw_init = dm_hw_init,
+	.hw_fini = dm_hw_fini,
+	.suspend = dm_suspend,
+	.resume = dm_resume,
+	.is_idle = dm_is_idle,
+	.wait_for_idle = dm_wait_for_idle,
+	.check_soft_reset = dm_check_soft_reset,
+	.soft_reset = dm_soft_reset,
+	.set_clockgating_state = dm_set_clockgating_state,
+	.set_powergating_state = dm_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version dm_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &amdgpu_dm_funcs,
+};
+
+
+static struct drm_atomic_state *
+dm_atomic_state_alloc(struct drm_device *dev)
+{
+	struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+	if (!state)
+		return NULL;
+
+	if (drm_atomic_state_init(dev, &state->base) < 0)
+		goto fail;
+
+	return &state->base;
+
+fail:
+	kfree(state);
+	return NULL;
+}
+
+static void
+dm_atomic_state_clear(struct drm_atomic_state *state)
+{
+	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+
+	if (dm_state->context) {
+		dc_release_state(dm_state->context);
+		dm_state->context = NULL;
+	}
+
+	drm_atomic_state_default_clear(state);
+}
+
+static void
+dm_atomic_state_alloc_free(struct drm_atomic_state *state)
+{
+	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+	drm_atomic_state_default_release(state);
+	kfree(dm_state);
+}
+
+static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
+	.fb_create = amdgpu_user_framebuffer_create,
+	.output_poll_changed = amdgpu_output_poll_changed,
+	.atomic_check = amdgpu_dm_atomic_check,
+	.atomic_commit = amdgpu_dm_atomic_commit,
+	.atomic_state_alloc = dm_atomic_state_alloc,
+	.atomic_state_clear = dm_atomic_state_clear,
+	.atomic_state_free = dm_atomic_state_alloc_free
+};
+
+static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
+	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
+};
+
+static void
+amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
+{
+	struct drm_connector *connector = &aconnector->base;
+	struct drm_device *dev = connector->dev;
+	struct dc_sink *sink;
+
+	/* MST handled by drm_mst framework */
+	if (aconnector->mst_mgr.mst_state == true)
+		return;
+
+
+	sink = aconnector->dc_link->local_sink;
+
+	/* Edid mgmt connector gets first update only in mode_valid hook and then
+	 * the connector sink is set to either fake or physical sink depends on link status.
+	 * don't do it here if u are during boot
+	 */
+	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
+			&& aconnector->dc_em_sink) {
+
+		/* For S3 resume with headless use eml_sink to fake stream
+		 * because on resume connecotr->sink is set ti NULL
+		 */
+		mutex_lock(&dev->mode_config.mutex);
+
+		if (sink) {
+			if (aconnector->dc_sink) {
+				amdgpu_dm_remove_sink_from_freesync_module(
+								connector);
+				/* retain and release bellow are used for
+				 * bump up refcount for sink because the link don't point
+				 * to it anymore after disconnect so on next crtc to connector
+				 * reshuffle by UMD we will get into unwanted dc_sink release
+				 */
+				if (aconnector->dc_sink != aconnector->dc_em_sink)
+					dc_sink_release(aconnector->dc_sink);
+			}
+			aconnector->dc_sink = sink;
+			amdgpu_dm_add_sink_to_freesync_module(
+						connector, aconnector->edid);
+		} else {
+			amdgpu_dm_remove_sink_from_freesync_module(connector);
+			if (!aconnector->dc_sink)
+				aconnector->dc_sink = aconnector->dc_em_sink;
+			else if (aconnector->dc_sink != aconnector->dc_em_sink)
+				dc_sink_retain(aconnector->dc_sink);
+		}
+
+		mutex_unlock(&dev->mode_config.mutex);
+		return;
+	}
+
+	/*
+	 * TODO: temporary guard to look for proper fix
+	 * if this sink is MST sink, we should not do anything
+	 */
+	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+		return;
+
+	if (aconnector->dc_sink == sink) {
+		/* We got a DP short pulse (Link Loss, DP CTS, etc...).
+		 * Do nothing!! */
+		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
+				aconnector->connector_id);
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
+		aconnector->connector_id, aconnector->dc_sink, sink);
+
+	mutex_lock(&dev->mode_config.mutex);
+
+	/* 1. Update status of the drm connector
+	 * 2. Send an event and let userspace tell us what to do */
+	if (sink) {
+		/* TODO: check if we still need the S3 mode update workaround.
+		 * If yes, put it here. */
+		if (aconnector->dc_sink)
+			amdgpu_dm_remove_sink_from_freesync_module(
+							connector);
+
+		aconnector->dc_sink = sink;
+		if (sink->dc_edid.length == 0) {
+			aconnector->edid = NULL;
+		} else {
+			aconnector->edid =
+				(struct edid *) sink->dc_edid.raw_edid;
+
+
+			drm_mode_connector_update_edid_property(connector,
+					aconnector->edid);
+		}
+		amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
+
+	} else {
+		amdgpu_dm_remove_sink_from_freesync_module(connector);
+		drm_mode_connector_update_edid_property(connector, NULL);
+		aconnector->num_modes = 0;
+		aconnector->dc_sink = NULL;
+	}
+
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void handle_hpd_irq(void *param)
+{
+	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+	struct drm_connector *connector = &aconnector->base;
+	struct drm_device *dev = connector->dev;
+
+	/* In case of failure or MST no need to update connector status or notify the OS
+	 * since (for MST case) MST does this in it's own context.
+	 */
+	mutex_lock(&aconnector->hpd_lock);
+
+	if (aconnector->fake_enable)
+		aconnector->fake_enable = false;
+
+	if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
+		amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+		drm_modeset_lock_all(dev);
+		dm_restore_drm_connector_state(dev, connector);
+		drm_modeset_unlock_all(dev);
+
+		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+			drm_kms_helper_hotplug_event(dev);
+	}
+	mutex_unlock(&aconnector->hpd_lock);
+
+}
+
+static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
+{
+	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+	uint8_t dret;
+	bool new_irq_handled = false;
+	int dpcd_addr;
+	int dpcd_bytes_to_read;
+
+	const int max_process_count = 30;
+	int process_count = 0;
+
+	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
+
+	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
+		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
+		/* DPCD 0x200 - 0x201 for downstream IRQ */
+		dpcd_addr = DP_SINK_COUNT;
+	} else {
+		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
+		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
+		dpcd_addr = DP_SINK_COUNT_ESI;
+	}
+
+	dret = drm_dp_dpcd_read(
+		&aconnector->dm_dp_aux.aux,
+		dpcd_addr,
+		esi,
+		dpcd_bytes_to_read);
+
+	while (dret == dpcd_bytes_to_read &&
+		process_count < max_process_count) {
+		uint8_t retry;
+		dret = 0;
+
+		process_count++;
+
+		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+		/* handle HPD short pulse irq */
+		if (aconnector->mst_mgr.mst_state)
+			drm_dp_mst_hpd_irq(
+				&aconnector->mst_mgr,
+				esi,
+				&new_irq_handled);
+
+		if (new_irq_handled) {
+			/* ACK at DPCD to notify down stream */
+			const int ack_dpcd_bytes_to_write =
+				dpcd_bytes_to_read - 1;
+
+			for (retry = 0; retry < 3; retry++) {
+				uint8_t wret;
+
+				wret = drm_dp_dpcd_write(
+					&aconnector->dm_dp_aux.aux,
+					dpcd_addr + 1,
+					&esi[1],
+					ack_dpcd_bytes_to_write);
+				if (wret == ack_dpcd_bytes_to_write)
+					break;
+			}
+
+			/* check if there is new irq to be handle */
+			dret = drm_dp_dpcd_read(
+				&aconnector->dm_dp_aux.aux,
+				dpcd_addr,
+				esi,
+				dpcd_bytes_to_read);
+
+			new_irq_handled = false;
+		} else {
+			break;
+		}
+	}
+
+	if (process_count == max_process_count)
+		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
+}
+
+static void handle_hpd_rx_irq(void *param)
+{
+	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+	struct drm_connector *connector = &aconnector->base;
+	struct drm_device *dev = connector->dev;
+	struct dc_link *dc_link = aconnector->dc_link;
+	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+
+	/* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
+	 * conflict, after implement i2c helper, this mutex should be
+	 * retired.
+	 */
+	if (dc_link->type != dc_connection_mst_branch)
+		mutex_lock(&aconnector->hpd_lock);
+
+	if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
+			!is_mst_root_connector) {
+		/* Downstream Port status changed. */
+		if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+			amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+			drm_modeset_lock_all(dev);
+			dm_restore_drm_connector_state(dev, connector);
+			drm_modeset_unlock_all(dev);
+
+			drm_kms_helper_hotplug_event(dev);
+		}
+	}
+	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
+	    (dc_link->type == dc_connection_mst_branch))
+		dm_handle_hpd_rx_irq(aconnector);
+
+	if (dc_link->type != dc_connection_mst_branch)
+		mutex_unlock(&aconnector->hpd_lock);
+}
+
+static void register_hpd_handlers(struct amdgpu_device *adev)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_connector *connector;
+	struct amdgpu_dm_connector *aconnector;
+	const struct dc_link *dc_link;
+	struct dc_interrupt_params int_params = {0};
+
+	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+	list_for_each_entry(connector,
+			&dev->mode_config.connector_list, head)	{
+
+		aconnector = to_amdgpu_dm_connector(connector);
+		dc_link = aconnector->dc_link;
+
+		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+			int_params.irq_source = dc_link->irq_source_hpd;
+
+			amdgpu_dm_irq_register_interrupt(adev, &int_params,
+					handle_hpd_irq,
+					(void *) aconnector);
+		}
+
+		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+
+			/* Also register for DP short pulse (hpd_rx). */
+			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+			int_params.irq_source =	dc_link->irq_source_hpd_rx;
+
+			amdgpu_dm_irq_register_interrupt(adev, &int_params,
+					handle_hpd_rx_irq,
+					(void *) aconnector);
+		}
+	}
+}
+
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dce110_register_irq_handlers(struct amdgpu_device *adev)
+{
+	struct dc *dc = adev->dm.dc;
+	struct common_irq_params *c_irq_params;
+	struct dc_interrupt_params int_params = {0};
+	int r;
+	int i;
+	unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
+
+	if (adev->asic_type == CHIP_VEGA10 ||
+	    adev->asic_type == CHIP_RAVEN)
+		client_id = AMDGPU_IH_CLIENTID_DCE;
+
+	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+	/* Actions of amdgpu_irq_add_id():
+	 * 1. Register a set() function with base driver.
+	 *    Base driver will call set() function to enable/disable an
+	 *    interrupt in DC hardware.
+	 * 2. Register amdgpu_dm_irq_handler().
+	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+	 *    coming from DC hardware.
+	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+	 *    for acknowledging and handling. */
+
+	/* Use VBLANK interrupt */
+	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
+		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
+		if (r) {
+			DRM_ERROR("Failed to add crtc irq id!\n");
+			return r;
+		}
+
+		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+		int_params.irq_source =
+			dc_interrupt_to_irq_source(dc, i, 0);
+
+		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+		c_irq_params->adev = adev;
+		c_irq_params->irq_src = int_params.irq_source;
+
+		amdgpu_dm_irq_register_interrupt(adev, &int_params,
+				dm_crtc_high_irq, c_irq_params);
+	}
+
+	/* Use GRPH_PFLIP interrupt */
+	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
+			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
+		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
+		if (r) {
+			DRM_ERROR("Failed to add page flip irq id!\n");
+			return r;
+		}
+
+		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+		int_params.irq_source =
+			dc_interrupt_to_irq_source(dc, i, 0);
+
+		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+		c_irq_params->adev = adev;
+		c_irq_params->irq_src = int_params.irq_source;
+
+		amdgpu_dm_irq_register_interrupt(adev, &int_params,
+				dm_pflip_high_irq, c_irq_params);
+
+	}
+
+	/* HPD */
+	r = amdgpu_irq_add_id(adev, client_id,
+			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+	if (r) {
+		DRM_ERROR("Failed to add hpd irq id!\n");
+		return r;
+	}
+
+	register_hpd_handlers(adev);
+
+	return 0;
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
+{
+	struct dc *dc = adev->dm.dc;
+	struct common_irq_params *c_irq_params;
+	struct dc_interrupt_params int_params = {0};
+	int r;
+	int i;
+
+	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+	/* Actions of amdgpu_irq_add_id():
+	 * 1. Register a set() function with base driver.
+	 *    Base driver will call set() function to enable/disable an
+	 *    interrupt in DC hardware.
+	 * 2. Register amdgpu_dm_irq_handler().
+	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+	 *    coming from DC hardware.
+	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+	 *    for acknowledging and handling.
+	 * */
+
+	/* Use VSTARTUP interrupt */
+	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
+			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
+			i++) {
+		r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
+
+		if (r) {
+			DRM_ERROR("Failed to add crtc irq id!\n");
+			return r;
+		}
+
+		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+		int_params.irq_source =
+			dc_interrupt_to_irq_source(dc, i, 0);
+
+		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+		c_irq_params->adev = adev;
+		c_irq_params->irq_src = int_params.irq_source;
+
+		amdgpu_dm_irq_register_interrupt(adev, &int_params,
+				dm_crtc_high_irq, c_irq_params);
+	}
+
+	/* Use GRPH_PFLIP interrupt */
+	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
+			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
+			i++) {
+		r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
+		if (r) {
+			DRM_ERROR("Failed to add page flip irq id!\n");
+			return r;
+		}
+
+		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+		int_params.irq_source =
+			dc_interrupt_to_irq_source(dc, i, 0);
+
+		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+		c_irq_params->adev = adev;
+		c_irq_params->irq_src = int_params.irq_source;
+
+		amdgpu_dm_irq_register_interrupt(adev, &int_params,
+				dm_pflip_high_irq, c_irq_params);
+
+	}
+
+	/* HPD */
+	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
+			&adev->hpd_irq);
+	if (r) {
+		DRM_ERROR("Failed to add hpd irq id!\n");
+		return r;
+	}
+
+	register_hpd_handlers(adev);
+
+	return 0;
+}
+#endif
+
+static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
+{
+	int r;
+
+	adev->mode_info.mode_config_initialized = true;
+
+	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
+	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
+
+	adev->ddev->mode_config.max_width = 16384;
+	adev->ddev->mode_config.max_height = 16384;
+
+	adev->ddev->mode_config.preferred_depth = 24;
+	adev->ddev->mode_config.prefer_shadow = 1;
+	/* indicate support of immediate flip */
+	adev->ddev->mode_config.async_page_flip = true;
+
+	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+	r = amdgpu_modeset_create_props(adev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+{
+	struct amdgpu_display_manager *dm = bl_get_data(bd);
+
+	if (dc_link_set_backlight_level(dm->backlight_link,
+			bd->props.brightness, 0, 0))
+		return 0;
+	else
+		return 1;
+}
+
+static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+{
+	return bd->props.brightness;
+}
+
+static const struct backlight_ops amdgpu_dm_backlight_ops = {
+	.get_brightness = amdgpu_dm_backlight_get_brightness,
+	.update_status	= amdgpu_dm_backlight_update_status,
+};
+
+static void
+amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
+{
+	char bl_name[16];
+	struct backlight_properties props = { 0 };
+
+	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+	props.type = BACKLIGHT_RAW;
+
+	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
+			dm->adev->ddev->primary->index);
+
+	dm->backlight_dev = backlight_device_register(bl_name,
+			dm->adev->ddev->dev,
+			dm,
+			&amdgpu_dm_backlight_ops,
+			&props);
+
+	if (IS_ERR(dm->backlight_dev))
+		DRM_ERROR("DM: Backlight registration failed!\n");
+	else
+		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
+}
+
+#endif
+
+/* In this architecture, the association
+ * connector -> encoder -> crtc
+ * id not really requried. The crtc and connector will hold the
+ * display_index as an abstraction to use with DAL component
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+{
+	struct amdgpu_display_manager *dm = &adev->dm;
+	uint32_t i;
+	struct amdgpu_dm_connector *aconnector = NULL;
+	struct amdgpu_encoder *aencoder = NULL;
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	uint32_t link_cnt;
+	unsigned long possible_crtcs;
+
+	link_cnt = dm->dc->caps.max_links;
+	if (amdgpu_dm_mode_config_init(dm->adev)) {
+		DRM_ERROR("DM: Failed to initialize mode config\n");
+		return -1;
+	}
+
+	for (i = 0; i < dm->dc->caps.max_planes; i++) {
+		struct amdgpu_plane *plane;
+
+		plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
+		mode_info->planes[i] = plane;
+
+		if (!plane) {
+			DRM_ERROR("KMS: Failed to allocate plane\n");
+			goto fail;
+		}
+		plane->base.type = mode_info->plane_type[i];
+
+		/*
+		 * HACK: IGT tests expect that each plane can only have one
+		 * one possible CRTC. For now, set one CRTC for each
+		 * plane that is not an underlay, but still allow multiple
+		 * CRTCs for underlay planes.
+		 */
+		possible_crtcs = 1 << i;
+		if (i >= dm->dc->caps.max_streams)
+			possible_crtcs = 0xff;
+
+		if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
+			DRM_ERROR("KMS: Failed to initialize plane\n");
+			goto fail;
+		}
+	}
+
+	for (i = 0; i < dm->dc->caps.max_streams; i++)
+		if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
+			DRM_ERROR("KMS: Failed to initialize crtc\n");
+			goto fail;
+		}
+
+	dm->display_indexes_num = dm->dc->caps.max_streams;
+
+	/* loops over all connectors on the board */
+	for (i = 0; i < link_cnt; i++) {
+
+		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
+			DRM_ERROR(
+				"KMS: Cannot support more than %d display indexes\n",
+					AMDGPU_DM_MAX_DISPLAY_INDEX);
+			continue;
+		}
+
+		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
+		if (!aconnector)
+			goto fail;
+
+		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
+		if (!aencoder)
+			goto fail;
+
+		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
+			DRM_ERROR("KMS: Failed to initialize encoder\n");
+			goto fail;
+		}
+
+		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
+			DRM_ERROR("KMS: Failed to initialize connector\n");
+			goto fail;
+		}
+
+		if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
+				DETECT_REASON_BOOT))
+			amdgpu_dm_update_connector_after_detect(aconnector);
+	}
+
+	/* Software is initialized. Now we can register interrupt handlers. */
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+	case CHIP_HAWAII:
+	case CHIP_KAVERI:
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+	case CHIP_TONGA:
+	case CHIP_FIJI:
+	case CHIP_CARRIZO:
+	case CHIP_STONEY:
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS10:
+	case CHIP_POLARIS12:
+	case CHIP_VEGA10:
+		if (dce110_register_irq_handlers(dm->adev)) {
+			DRM_ERROR("DM: Failed to initialize IRQ\n");
+			goto fail;
+		}
+		break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+	case CHIP_RAVEN:
+		if (dcn10_register_irq_handlers(dm->adev)) {
+			DRM_ERROR("DM: Failed to initialize IRQ\n");
+			goto fail;
+		}
+		/*
+		 * Temporary disable until pplib/smu interaction is implemented
+		 */
+		dm->dc->debug.disable_stutter = true;
+		break;
+#endif
+	default:
+		DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	kfree(aencoder);
+	kfree(aconnector);
+	for (i = 0; i < dm->dc->caps.max_planes; i++)
+		kfree(mode_info->planes[i]);
+	return -1;
+}
+
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
+{
+	drm_mode_config_cleanup(dm->ddev);
+	return;
+}
+
+/******************************************************************************
+ * amdgpu_display_funcs functions
+ *****************************************************************************/
+
+/**
+ * dm_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line buffer allocation.
+ */
+static void dm_bandwidth_update(struct amdgpu_device *adev)
+{
+	/* TODO: implement later */
+}
+
+static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
+				     u8 level)
+{
+	/* TODO: translate amdgpu_encoder to display_index and call DAL */
+}
+
+static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
+{
+	/* TODO: translate amdgpu_encoder to display_index and call DAL */
+	return 0;
+}
+
+static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
+				struct drm_file *filp)
+{
+	struct mod_freesync_params freesync_params;
+	uint8_t num_streams;
+	uint8_t i;
+
+	struct amdgpu_device *adev = dev->dev_private;
+	int r = 0;
+
+	/* Get freesync enable flag from DRM */
+
+	num_streams = dc_get_current_stream_count(adev->dm.dc);
+
+	for (i = 0; i < num_streams; i++) {
+		struct dc_stream_state *stream;
+		stream = dc_get_stream_at_index(adev->dm.dc, i);
+
+		mod_freesync_update_state(adev->dm.freesync_module,
+					  &stream, 1, &freesync_params);
+	}
+
+	return r;
+}
+
+static const struct amdgpu_display_funcs dm_display_funcs = {
+	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
+	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
+	.vblank_wait = NULL,
+	.backlight_set_level =
+		dm_set_backlight_level,/* called unconditionally */
+	.backlight_get_level =
+		dm_get_backlight_level,/* called unconditionally */
+	.hpd_sense = NULL,/* called unconditionally */
+	.hpd_set_polarity = NULL, /* called unconditionally */
+	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
+	.page_flip_get_scanoutpos =
+		dm_crtc_get_scanoutpos,/* called unconditionally */
+	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
+	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
+	.notify_freesync = amdgpu_notify_freesync,
+
+};
+
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+
+static ssize_t s3_debug_store(struct device *device,
+			      struct device_attribute *attr,
+			      const char *buf,
+			      size_t count)
+{
+	int ret;
+	int s3_state;
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	struct amdgpu_device *adev = drm_dev->dev_private;
+
+	ret = kstrtoint(buf, 0, &s3_state);
+
+	if (ret == 0) {
+		if (s3_state) {
+			dm_resume(adev);
+			amdgpu_dm_display_resume(adev);
+			drm_kms_helper_hotplug_event(adev->ddev);
+		} else
+			dm_suspend(adev);
+	}
+
+	return ret == 0 ? count : 0;
+}
+
+DEVICE_ATTR_WO(s3_debug);
+
+#endif
+
+static int dm_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
+	amdgpu_dm_set_irq_funcs(adev);
+
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+	case CHIP_HAWAII:
+		adev->mode_info.num_crtc = 6;
+		adev->mode_info.num_hpd = 6;
+		adev->mode_info.num_dig = 6;
+		adev->mode_info.plane_type = dm_plane_type_default;
+		break;
+	case CHIP_KAVERI:
+		adev->mode_info.num_crtc = 4;
+		adev->mode_info.num_hpd = 6;
+		adev->mode_info.num_dig = 7;
+		adev->mode_info.plane_type = dm_plane_type_default;
+		break;
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+		adev->mode_info.num_crtc = 2;
+		adev->mode_info.num_hpd = 6;
+		adev->mode_info.num_dig = 6;
+		adev->mode_info.plane_type = dm_plane_type_default;
+		break;
+	case CHIP_FIJI:
+	case CHIP_TONGA:
+		adev->mode_info.num_crtc = 6;
+		adev->mode_info.num_hpd = 6;
+		adev->mode_info.num_dig = 7;
+		adev->mode_info.plane_type = dm_plane_type_default;
+		break;
+	case CHIP_CARRIZO:
+		adev->mode_info.num_crtc = 3;
+		adev->mode_info.num_hpd = 6;
+		adev->mode_info.num_dig = 9;
+		adev->mode_info.plane_type = dm_plane_type_carizzo;
+		break;
+	case CHIP_STONEY:
+		adev->mode_info.num_crtc = 2;
+		adev->mode_info.num_hpd = 6;
+		adev->mode_info.num_dig = 9;
+		adev->mode_info.plane_type = dm_plane_type_stoney;
+		break;
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS12:
+		adev->mode_info.num_crtc = 5;
+		adev->mode_info.num_hpd = 5;
+		adev->mode_info.num_dig = 5;
+		adev->mode_info.plane_type = dm_plane_type_default;
+		break;
+	case CHIP_POLARIS10:
+		adev->mode_info.num_crtc = 6;
+		adev->mode_info.num_hpd = 6;
+		adev->mode_info.num_dig = 6;
+		adev->mode_info.plane_type = dm_plane_type_default;
+		break;
+	case CHIP_VEGA10:
+		adev->mode_info.num_crtc = 6;
+		adev->mode_info.num_hpd = 6;
+		adev->mode_info.num_dig = 6;
+		adev->mode_info.plane_type = dm_plane_type_default;
+		break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+	case CHIP_RAVEN:
+		adev->mode_info.num_crtc = 4;
+		adev->mode_info.num_hpd = 4;
+		adev->mode_info.num_dig = 4;
+		adev->mode_info.plane_type = dm_plane_type_default;
+		break;
+#endif
+	default:
+		DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+		return -EINVAL;
+	}
+
+	if (adev->mode_info.funcs == NULL)
+		adev->mode_info.funcs = &dm_display_funcs;
+
+	/* Note: Do NOT change adev->audio_endpt_rreg and
+	 * adev->audio_endpt_wreg because they are initialised in
+	 * amdgpu_device_init() */
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+	device_create_file(
+		adev->ddev->dev,
+		&dev_attr_s3_debug);
+#endif
+
+	return 0;
+}
+
+struct dm_connector_state {
+	struct drm_connector_state base;
+
+	enum amdgpu_rmx_type scaling;
+	uint8_t underscan_vborder;
+	uint8_t underscan_hborder;
+	bool underscan_enable;
+};
+
+#define to_dm_connector_state(x)\
+	container_of((x), struct dm_connector_state, base)
+
+static bool modeset_required(struct drm_crtc_state *crtc_state,
+			     struct dc_stream_state *new_stream,
+			     struct dc_stream_state *old_stream)
+{
+	if (!drm_atomic_crtc_needs_modeset(crtc_state))
+		return false;
+
+	if (!crtc_state->enable)
+		return false;
+
+	return crtc_state->active;
+}
+
+static bool modereset_required(struct drm_crtc_state *crtc_state)
+{
+	if (!drm_atomic_crtc_needs_modeset(crtc_state))
+		return false;
+
+	return !crtc_state->enable || !crtc_state->active;
+}
+
+static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+	kfree(encoder);
+}
+
+static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+	.destroy = amdgpu_dm_encoder_destroy,
+};
+
+static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
+					struct dc_plane_state *plane_state)
+{
+	plane_state->src_rect.x = state->src_x >> 16;
+	plane_state->src_rect.y = state->src_y >> 16;
+	/*we ignore for now mantissa and do not to deal with floating pixels :(*/
+	plane_state->src_rect.width = state->src_w >> 16;
+
+	if (plane_state->src_rect.width == 0)
+		return false;
+
+	plane_state->src_rect.height = state->src_h >> 16;
+	if (plane_state->src_rect.height == 0)
+		return false;
+
+	plane_state->dst_rect.x = state->crtc_x;
+	plane_state->dst_rect.y = state->crtc_y;
+
+	if (state->crtc_w == 0)
+		return false;
+
+	plane_state->dst_rect.width = state->crtc_w;
+
+	if (state->crtc_h == 0)
+		return false;
+
+	plane_state->dst_rect.height = state->crtc_h;
+
+	plane_state->clip_rect = plane_state->dst_rect;
+
+	switch (state->rotation & DRM_MODE_ROTATE_MASK) {
+	case DRM_MODE_ROTATE_0:
+		plane_state->rotation = ROTATION_ANGLE_0;
+		break;
+	case DRM_MODE_ROTATE_90:
+		plane_state->rotation = ROTATION_ANGLE_90;
+		break;
+	case DRM_MODE_ROTATE_180:
+		plane_state->rotation = ROTATION_ANGLE_180;
+		break;
+	case DRM_MODE_ROTATE_270:
+		plane_state->rotation = ROTATION_ANGLE_270;
+		break;
+	default:
+		plane_state->rotation = ROTATION_ANGLE_0;
+		break;
+	}
+
+	return true;
+}
+static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
+		       uint64_t *tiling_flags,
+		       uint64_t *fb_location)
+{
+	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+	int r = amdgpu_bo_reserve(rbo, false);
+
+	if (unlikely(r)) {
+		// Don't show error msg. when return -ERESTARTSYS
+		if (r != -ERESTARTSYS)
+			DRM_ERROR("Unable to reserve buffer: %d\n", r);
+		return r;
+	}
+
+	if (fb_location)
+		*fb_location = amdgpu_bo_gpu_offset(rbo);
+
+	if (tiling_flags)
+		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
+
+	amdgpu_bo_unreserve(rbo);
+
+	return r;
+}
+
+static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
+					 struct dc_plane_state *plane_state,
+					 const struct amdgpu_framebuffer *amdgpu_fb,
+					 bool addReq)
+{
+	uint64_t tiling_flags;
+	uint64_t fb_location = 0;
+	uint64_t chroma_addr = 0;
+	unsigned int awidth;
+	const struct drm_framebuffer *fb = &amdgpu_fb->base;
+	int ret = 0;
+	struct drm_format_name_buf format_name;
+
+	ret = get_fb_info(
+		amdgpu_fb,
+		&tiling_flags,
+		addReq == true ? &fb_location:NULL);
+
+	if (ret)
+		return ret;
+
+	switch (fb->format->format) {
+	case DRM_FORMAT_C8:
+		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
+		break;
+	case DRM_FORMAT_RGB565:
+		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
+		break;
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+		break;
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010:
+		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
+		break;
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ABGR2101010:
+		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
+		break;
+	case DRM_FORMAT_NV21:
+		plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
+		break;
+	case DRM_FORMAT_NV12:
+		plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
+		break;
+	default:
+		DRM_ERROR("Unsupported screen format %s\n",
+			  drm_get_format_name(fb->format->format, &format_name));
+		return -EINVAL;
+	}
+
+	if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+		plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
+		plane_state->address.grph.addr.low_part = lower_32_bits(fb_location);
+		plane_state->address.grph.addr.high_part = upper_32_bits(fb_location);
+		plane_state->plane_size.grph.surface_size.x = 0;
+		plane_state->plane_size.grph.surface_size.y = 0;
+		plane_state->plane_size.grph.surface_size.width = fb->width;
+		plane_state->plane_size.grph.surface_size.height = fb->height;
+		plane_state->plane_size.grph.surface_pitch =
+				fb->pitches[0] / fb->format->cpp[0];
+		/* TODO: unhardcode */
+		plane_state->color_space = COLOR_SPACE_SRGB;
+
+	} else {
+		awidth = ALIGN(fb->width, 64);
+		plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
+		plane_state->address.video_progressive.luma_addr.low_part
+						= lower_32_bits(fb_location);
+		plane_state->address.video_progressive.luma_addr.high_part
+						= upper_32_bits(fb_location);
+		chroma_addr = fb_location + (u64)(awidth * fb->height);
+		plane_state->address.video_progressive.chroma_addr.low_part
+						= lower_32_bits(chroma_addr);
+		plane_state->address.video_progressive.chroma_addr.high_part
+						= upper_32_bits(chroma_addr);
+		plane_state->plane_size.video.luma_size.x = 0;
+		plane_state->plane_size.video.luma_size.y = 0;
+		plane_state->plane_size.video.luma_size.width = awidth;
+		plane_state->plane_size.video.luma_size.height = fb->height;
+		/* TODO: unhardcode */
+		plane_state->plane_size.video.luma_pitch = awidth;
+
+		plane_state->plane_size.video.chroma_size.x = 0;
+		plane_state->plane_size.video.chroma_size.y = 0;
+		plane_state->plane_size.video.chroma_size.width = awidth;
+		plane_state->plane_size.video.chroma_size.height = fb->height;
+		plane_state->plane_size.video.chroma_pitch = awidth / 2;
+
+		/* TODO: unhardcode */
+		plane_state->color_space = COLOR_SPACE_YCBCR709;
+	}
+
+	memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
+
+	/* Fill GFX8 params */
+	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
+		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
+
+		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
+		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
+		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
+		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
+		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+
+		/* XXX fix me for VI */
+		plane_state->tiling_info.gfx8.num_banks = num_banks;
+		plane_state->tiling_info.gfx8.array_mode =
+				DC_ARRAY_2D_TILED_THIN1;
+		plane_state->tiling_info.gfx8.tile_split = tile_split;
+		plane_state->tiling_info.gfx8.bank_width = bankw;
+		plane_state->tiling_info.gfx8.bank_height = bankh;
+		plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
+		plane_state->tiling_info.gfx8.tile_mode =
+				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
+	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
+			== DC_ARRAY_1D_TILED_THIN1) {
+		plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
+	}
+
+	plane_state->tiling_info.gfx8.pipe_config =
+			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
+
+	if (adev->asic_type == CHIP_VEGA10 ||
+	    adev->asic_type == CHIP_RAVEN) {
+		/* Fill GFX9 params */
+		plane_state->tiling_info.gfx9.num_pipes =
+			adev->gfx.config.gb_addr_config_fields.num_pipes;
+		plane_state->tiling_info.gfx9.num_banks =
+			adev->gfx.config.gb_addr_config_fields.num_banks;
+		plane_state->tiling_info.gfx9.pipe_interleave =
+			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
+		plane_state->tiling_info.gfx9.num_shader_engines =
+			adev->gfx.config.gb_addr_config_fields.num_se;
+		plane_state->tiling_info.gfx9.max_compressed_frags =
+			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
+		plane_state->tiling_info.gfx9.num_rb_per_se =
+			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
+		plane_state->tiling_info.gfx9.swizzle =
+			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
+		plane_state->tiling_info.gfx9.shaderEnable = 1;
+	}
+
+	plane_state->visible = true;
+	plane_state->scaling_quality.h_taps_c = 0;
+	plane_state->scaling_quality.v_taps_c = 0;
+
+	/* is this needed? is plane_state zeroed at allocation? */
+	plane_state->scaling_quality.h_taps = 0;
+	plane_state->scaling_quality.v_taps = 0;
+	plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
+
+	return ret;
+
+}
+
+static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state,
+				       struct dc_plane_state *plane_state)
+{
+	int i;
+	struct dc_gamma *gamma;
+	struct drm_color_lut *lut =
+			(struct drm_color_lut *) crtc_state->gamma_lut->data;
+
+	gamma = dc_create_gamma();
+
+	if (gamma == NULL) {
+		WARN_ON(1);
+		return;
+	}
+
+	gamma->type = GAMMA_RGB_256;
+	gamma->num_entries = GAMMA_RGB_256_ENTRIES;
+	for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) {
+		gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red);
+		gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green);
+		gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue);
+	}
+
+	plane_state->gamma_correction = gamma;
+}
+
+static int fill_plane_attributes(struct amdgpu_device *adev,
+				 struct dc_plane_state *dc_plane_state,
+				 struct drm_plane_state *plane_state,
+				 struct drm_crtc_state *crtc_state,
+				 bool addrReq)
+{
+	const struct amdgpu_framebuffer *amdgpu_fb =
+		to_amdgpu_framebuffer(plane_state->fb);
+	const struct drm_crtc *crtc = plane_state->crtc;
+	struct dc_transfer_func *input_tf;
+	int ret = 0;
+
+	if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
+		return -EINVAL;
+
+	ret = fill_plane_attributes_from_fb(
+		crtc->dev->dev_private,
+		dc_plane_state,
+		amdgpu_fb,
+		addrReq);
+
+	if (ret)
+		return ret;
+
+	input_tf = dc_create_transfer_func();
+
+	if (input_tf == NULL)
+		return -ENOMEM;
+
+	input_tf->type = TF_TYPE_PREDEFINED;
+	input_tf->tf = TRANSFER_FUNCTION_SRGB;
+
+	dc_plane_state->in_transfer_func = input_tf;
+
+	/* In case of gamma set, update gamma value */
+	if (crtc_state->gamma_lut)
+		fill_gamma_from_crtc_state(crtc_state, dc_plane_state);
+
+	return ret;
+}
+
+/*****************************************************************************/
+
+static void update_stream_scaling_settings(const struct drm_display_mode *mode,
+					   const struct dm_connector_state *dm_state,
+					   struct dc_stream_state *stream)
+{
+	enum amdgpu_rmx_type rmx_type;
+
+	struct rect src = { 0 }; /* viewport in composition space*/
+	struct rect dst = { 0 }; /* stream addressable area */
+
+	/* no mode. nothing to be done */
+	if (!mode)
+		return;
+
+	/* Full screen scaling by default */
+	src.width = mode->hdisplay;
+	src.height = mode->vdisplay;
+	dst.width = stream->timing.h_addressable;
+	dst.height = stream->timing.v_addressable;
+
+	rmx_type = dm_state->scaling;
+	if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
+		if (src.width * dst.height <
+				src.height * dst.width) {
+			/* height needs less upscaling/more downscaling */
+			dst.width = src.width *
+					dst.height / src.height;
+		} else {
+			/* width needs less upscaling/more downscaling */
+			dst.height = src.height *
+					dst.width / src.width;
+		}
+	} else if (rmx_type == RMX_CENTER) {
+		dst = src;
+	}
+
+	dst.x = (stream->timing.h_addressable - dst.width) / 2;
+	dst.y = (stream->timing.v_addressable - dst.height) / 2;
+
+	if (dm_state->underscan_enable) {
+		dst.x += dm_state->underscan_hborder / 2;
+		dst.y += dm_state->underscan_vborder / 2;
+		dst.width -= dm_state->underscan_hborder;
+		dst.height -= dm_state->underscan_vborder;
+	}
+
+	stream->src = src;
+	stream->dst = dst;
+
+	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
+			dst.x, dst.y, dst.width, dst.height);
+
+}
+
+static enum dc_color_depth
+convert_color_depth_from_display_info(const struct drm_connector *connector)
+{
+	uint32_t bpc = connector->display_info.bpc;
+
+	/* Limited color depth to 8bit
+	 * TODO: Still need to handle deep color
+	 */
+	if (bpc > 8)
+		bpc = 8;
+
+	switch (bpc) {
+	case 0:
+		/* Temporary Work around, DRM don't parse color depth for
+		 * EDID revision before 1.4
+		 * TODO: Fix edid parsing
+		 */
+		return COLOR_DEPTH_888;
+	case 6:
+		return COLOR_DEPTH_666;
+	case 8:
+		return COLOR_DEPTH_888;
+	case 10:
+		return COLOR_DEPTH_101010;
+	case 12:
+		return COLOR_DEPTH_121212;
+	case 14:
+		return COLOR_DEPTH_141414;
+	case 16:
+		return COLOR_DEPTH_161616;
+	default:
+		return COLOR_DEPTH_UNDEFINED;
+	}
+}
+
+static enum dc_aspect_ratio
+get_aspect_ratio(const struct drm_display_mode *mode_in)
+{
+	int32_t width = mode_in->crtc_hdisplay * 9;
+	int32_t height = mode_in->crtc_vdisplay * 16;
+
+	if ((width - height) < 10 && (width - height) > -10)
+		return ASPECT_RATIO_16_9;
+	else
+		return ASPECT_RATIO_4_3;
+}
+
+static enum dc_color_space
+get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
+{
+	enum dc_color_space color_space = COLOR_SPACE_SRGB;
+
+	switch (dc_crtc_timing->pixel_encoding)	{
+	case PIXEL_ENCODING_YCBCR422:
+	case PIXEL_ENCODING_YCBCR444:
+	case PIXEL_ENCODING_YCBCR420:
+	{
+		/*
+		 * 27030khz is the separation point between HDTV and SDTV
+		 * according to HDMI spec, we use YCbCr709 and YCbCr601
+		 * respectively
+		 */
+		if (dc_crtc_timing->pix_clk_khz > 27030) {
+			if (dc_crtc_timing->flags.Y_ONLY)
+				color_space =
+					COLOR_SPACE_YCBCR709_LIMITED;
+			else
+				color_space = COLOR_SPACE_YCBCR709;
+		} else {
+			if (dc_crtc_timing->flags.Y_ONLY)
+				color_space =
+					COLOR_SPACE_YCBCR601_LIMITED;
+			else
+				color_space = COLOR_SPACE_YCBCR601;
+		}
+
+	}
+	break;
+	case PIXEL_ENCODING_RGB:
+		color_space = COLOR_SPACE_SRGB;
+		break;
+
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	return color_space;
+}
+
+/*****************************************************************************/
+
+static void
+fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
+					     const struct drm_display_mode *mode_in,
+					     const struct drm_connector *connector)
+{
+	struct dc_crtc_timing *timing_out = &stream->timing;
+
+	memset(timing_out, 0, sizeof(struct dc_crtc_timing));
+
+	timing_out->h_border_left = 0;
+	timing_out->h_border_right = 0;
+	timing_out->v_border_top = 0;
+	timing_out->v_border_bottom = 0;
+	/* TODO: un-hardcode */
+
+	if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
+			&& stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
+	else
+		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
+
+	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
+	timing_out->display_color_depth = convert_color_depth_from_display_info(
+			connector);
+	timing_out->scan_type = SCANNING_TYPE_NODATA;
+	timing_out->hdmi_vic = 0;
+	timing_out->vic = drm_match_cea_mode(mode_in);
+
+	timing_out->h_addressable = mode_in->crtc_hdisplay;
+	timing_out->h_total = mode_in->crtc_htotal;
+	timing_out->h_sync_width =
+		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
+	timing_out->h_front_porch =
+		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
+	timing_out->v_total = mode_in->crtc_vtotal;
+	timing_out->v_addressable = mode_in->crtc_vdisplay;
+	timing_out->v_front_porch =
+		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
+	timing_out->v_sync_width =
+		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
+	timing_out->pix_clk_khz = mode_in->crtc_clock;
+	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
+	if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
+		timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
+	if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
+		timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
+
+	stream->output_color_space = get_output_color_space(timing_out);
+
+	{
+		struct dc_transfer_func *tf = dc_create_transfer_func();
+
+		tf->type = TF_TYPE_PREDEFINED;
+		tf->tf = TRANSFER_FUNCTION_SRGB;
+		stream->out_transfer_func = tf;
+	}
+}
+
+static void fill_audio_info(struct audio_info *audio_info,
+			    const struct drm_connector *drm_connector,
+			    const struct dc_sink *dc_sink)
+{
+	int i = 0;
+	int cea_revision = 0;
+	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
+
+	audio_info->manufacture_id = edid_caps->manufacturer_id;
+	audio_info->product_id = edid_caps->product_id;
+
+	cea_revision = drm_connector->display_info.cea_rev;
+
+	strncpy(audio_info->display_name,
+		edid_caps->display_name,
+		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
+
+	if (cea_revision >= 3) {
+		audio_info->mode_count = edid_caps->audio_mode_count;
+
+		for (i = 0; i < audio_info->mode_count; ++i) {
+			audio_info->modes[i].format_code =
+					(enum audio_format_code)
+					(edid_caps->audio_modes[i].format_code);
+			audio_info->modes[i].channel_count =
+					edid_caps->audio_modes[i].channel_count;
+			audio_info->modes[i].sample_rates.all =
+					edid_caps->audio_modes[i].sample_rate;
+			audio_info->modes[i].sample_size =
+					edid_caps->audio_modes[i].sample_size;
+		}
+	}
+
+	audio_info->flags.all = edid_caps->speaker_flags;
+
+	/* TODO: We only check for the progressive mode, check for interlace mode too */
+	if (drm_connector->latency_present[0]) {
+		audio_info->video_latency = drm_connector->video_latency[0];
+		audio_info->audio_latency = drm_connector->audio_latency[0];
+	}
+
+	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
+
+}
+
+static void
+copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
+				      struct drm_display_mode *dst_mode)
+{
+	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
+	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
+	dst_mode->crtc_clock = src_mode->crtc_clock;
+	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
+	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
+	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
+	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
+	dst_mode->crtc_htotal = src_mode->crtc_htotal;
+	dst_mode->crtc_hskew = src_mode->crtc_hskew;
+	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
+	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
+	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
+	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
+	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
+}
+
+static void
+decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
+					const struct drm_display_mode *native_mode,
+					bool scale_enabled)
+{
+	if (scale_enabled) {
+		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+	} else if (native_mode->clock == drm_mode->clock &&
+			native_mode->htotal == drm_mode->htotal &&
+			native_mode->vtotal == drm_mode->vtotal) {
+		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+	} else {
+		/* no scaling nor amdgpu inserted, no need to patch */
+	}
+}
+
+static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
+{
+	struct dc_sink *sink = NULL;
+	struct dc_sink_init_data sink_init_data = { 0 };
+
+	sink_init_data.link = aconnector->dc_link;
+	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
+
+	sink = dc_sink_create(&sink_init_data);
+	if (!sink) {
+		DRM_ERROR("Failed to create sink!\n");
+		return -ENOMEM;
+	}
+
+	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
+	aconnector->fake_enable = true;
+
+	aconnector->dc_sink = sink;
+	aconnector->dc_link->local_sink = sink;
+
+	return 0;
+}
+
+static struct dc_stream_state *
+create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+		       const struct drm_display_mode *drm_mode,
+		       const struct dm_connector_state *dm_state)
+{
+	struct drm_display_mode *preferred_mode = NULL;
+	const struct drm_connector *drm_connector;
+	struct dc_stream_state *stream = NULL;
+	struct drm_display_mode mode = *drm_mode;
+	bool native_mode_found = false;
+
+	if (aconnector == NULL) {
+		DRM_ERROR("aconnector is NULL!\n");
+		goto drm_connector_null;
+	}
+
+	if (dm_state == NULL) {
+		DRM_ERROR("dm_state is NULL!\n");
+		goto dm_state_null;
+	}
+
+	drm_connector = &aconnector->base;
+
+	if (!aconnector->dc_sink) {
+		/*
+		 * Exclude MST from creating fake_sink
+		 * TODO: need to enable MST into fake_sink feature
+		 */
+		if (aconnector->mst_port)
+			goto stream_create_fail;
+
+		if (create_fake_sink(aconnector))
+			goto stream_create_fail;
+	}
+
+	stream = dc_create_stream_for_sink(aconnector->dc_sink);
+
+	if (stream == NULL) {
+		DRM_ERROR("Failed to create stream for sink!\n");
+		goto stream_create_fail;
+	}
+
+	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
+		/* Search for preferred mode */
+		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
+			native_mode_found = true;
+			break;
+		}
+	}
+	if (!native_mode_found)
+		preferred_mode = list_first_entry_or_null(
+				&aconnector->base.modes,
+				struct drm_display_mode,
+				head);
+
+	if (preferred_mode == NULL) {
+		/* This may not be an error, the use case is when we we have no
+		 * usermode calls to reset and set mode upon hotplug. In this
+		 * case, we call set mode ourselves to restore the previous mode
+		 * and the modelist may not be filled in in time.
+		 */
+		DRM_DEBUG_DRIVER("No preferred mode found\n");
+	} else {
+		decide_crtc_timing_for_drm_display_mode(
+				&mode, preferred_mode,
+				dm_state->scaling != RMX_OFF);
+	}
+
+	fill_stream_properties_from_drm_display_mode(stream,
+			&mode, &aconnector->base);
+	update_stream_scaling_settings(&mode, dm_state, stream);
+
+	fill_audio_info(
+		&stream->audio_info,
+		drm_connector,
+		aconnector->dc_sink);
+
+stream_create_fail:
+dm_state_null:
+drm_connector_null:
+	return stream;
+}
+
+static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
+{
+	drm_crtc_cleanup(crtc);
+	kfree(crtc);
+}
+
+static void dm_crtc_destroy_state(struct drm_crtc *crtc,
+				  struct drm_crtc_state *state)
+{
+	struct dm_crtc_state *cur = to_dm_crtc_state(state);
+
+	/* TODO Destroy dc_stream objects are stream object is flattened */
+	if (cur->stream)
+		dc_stream_release(cur->stream);
+
+
+	__drm_atomic_helper_crtc_destroy_state(state);
+
+
+	kfree(state);
+}
+
+static void dm_crtc_reset_state(struct drm_crtc *crtc)
+{
+	struct dm_crtc_state *state;
+
+	if (crtc->state)
+		dm_crtc_destroy_state(crtc, crtc->state);
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (WARN_ON(!state))
+		return;
+
+	crtc->state = &state->base;
+	crtc->state->crtc = crtc;
+
+}
+
+static struct drm_crtc_state *
+dm_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+	struct dm_crtc_state *state, *cur;
+
+	cur = to_dm_crtc_state(crtc->state);
+
+	if (WARN_ON(!crtc->state))
+		return NULL;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return NULL;
+
+	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+	if (cur->stream) {
+		state->stream = cur->stream;
+		dc_stream_retain(state->stream);
+	}
+
+	/* TODO Duplicate dc_stream after objects are stream object is flattened */
+
+	return &state->base;
+}
+
+/* Implemented only the options currently availible for the driver */
+static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
+	.reset = dm_crtc_reset_state,
+	.destroy = amdgpu_dm_crtc_destroy,
+	.gamma_set = drm_atomic_helper_legacy_gamma_set,
+	.set_config = drm_atomic_helper_set_config,
+	.page_flip = drm_atomic_helper_page_flip,
+	.atomic_duplicate_state = dm_crtc_duplicate_state,
+	.atomic_destroy_state = dm_crtc_destroy_state,
+};
+
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+	bool connected;
+	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+	/* Notes:
+	 * 1. This interface is NOT called in context of HPD irq.
+	 * 2. This interface *is called* in context of user-mode ioctl. Which
+	 * makes it a bad place for *any* MST-related activit. */
+
+	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
+	    !aconnector->fake_enable)
+		connected = (aconnector->dc_sink != NULL);
+	else
+		connected = (aconnector->base.force == DRM_FORCE_ON);
+
+	return (connected ? connector_status_connected :
+			connector_status_disconnected);
+}
+
+int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+					    struct drm_connector_state *connector_state,
+					    struct drm_property *property,
+					    uint64_t val)
+{
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct dm_connector_state *dm_old_state =
+		to_dm_connector_state(connector->state);
+	struct dm_connector_state *dm_new_state =
+		to_dm_connector_state(connector_state);
+
+	int ret = -EINVAL;
+
+	if (property == dev->mode_config.scaling_mode_property) {
+		enum amdgpu_rmx_type rmx_type;
+
+		switch (val) {
+		case DRM_MODE_SCALE_CENTER:
+			rmx_type = RMX_CENTER;
+			break;
+		case DRM_MODE_SCALE_ASPECT:
+			rmx_type = RMX_ASPECT;
+			break;
+		case DRM_MODE_SCALE_FULLSCREEN:
+			rmx_type = RMX_FULL;
+			break;
+		case DRM_MODE_SCALE_NONE:
+		default:
+			rmx_type = RMX_OFF;
+			break;
+		}
+
+		if (dm_old_state->scaling == rmx_type)
+			return 0;
+
+		dm_new_state->scaling = rmx_type;
+		ret = 0;
+	} else if (property == adev->mode_info.underscan_hborder_property) {
+		dm_new_state->underscan_hborder = val;
+		ret = 0;
+	} else if (property == adev->mode_info.underscan_vborder_property) {
+		dm_new_state->underscan_vborder = val;
+		ret = 0;
+	} else if (property == adev->mode_info.underscan_property) {
+		dm_new_state->underscan_enable = val;
+		ret = 0;
+	}
+
+	return ret;
+}
+
+int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+					    const struct drm_connector_state *state,
+					    struct drm_property *property,
+					    uint64_t *val)
+{
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct dm_connector_state *dm_state =
+		to_dm_connector_state(state);
+	int ret = -EINVAL;
+
+	if (property == dev->mode_config.scaling_mode_property) {
+		switch (dm_state->scaling) {
+		case RMX_CENTER:
+			*val = DRM_MODE_SCALE_CENTER;
+			break;
+		case RMX_ASPECT:
+			*val = DRM_MODE_SCALE_ASPECT;
+			break;
+		case RMX_FULL:
+			*val = DRM_MODE_SCALE_FULLSCREEN;
+			break;
+		case RMX_OFF:
+		default:
+			*val = DRM_MODE_SCALE_NONE;
+			break;
+		}
+		ret = 0;
+	} else if (property == adev->mode_info.underscan_hborder_property) {
+		*val = dm_state->underscan_hborder;
+		ret = 0;
+	} else if (property == adev->mode_info.underscan_vborder_property) {
+		*val = dm_state->underscan_vborder;
+		ret = 0;
+	} else if (property == adev->mode_info.underscan_property) {
+		*val = dm_state->underscan_enable;
+		ret = 0;
+	}
+	return ret;
+}
+
+static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
+{
+	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+	const struct dc_link *link = aconnector->dc_link;
+	struct amdgpu_device *adev = connector->dev->dev_private;
+	struct amdgpu_display_manager *dm = &adev->dm;
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+	if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
+		amdgpu_dm_register_backlight_device(dm);
+
+		if (dm->backlight_dev) {
+			backlight_device_unregister(dm->backlight_dev);
+			dm->backlight_dev = NULL;
+		}
+
+	}
+#endif
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
+{
+	struct dm_connector_state *state =
+		to_dm_connector_state(connector->state);
+
+	kfree(state);
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+	if (state) {
+		state->scaling = RMX_OFF;
+		state->underscan_enable = false;
+		state->underscan_hborder = 0;
+		state->underscan_vborder = 0;
+
+		connector->state = &state->base;
+		connector->state->connector = connector;
+	}
+}
+
+struct drm_connector_state *
+amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
+{
+	struct dm_connector_state *state =
+		to_dm_connector_state(connector->state);
+
+	struct dm_connector_state *new_state =
+			kmemdup(state, sizeof(*state), GFP_KERNEL);
+
+	if (new_state) {
+		__drm_atomic_helper_connector_duplicate_state(connector,
+							      &new_state->base);
+		return &new_state->base;
+	}
+
+	return NULL;
+}
+
+static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
+	.reset = amdgpu_dm_connector_funcs_reset,
+	.detect = amdgpu_dm_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = amdgpu_dm_connector_destroy,
+	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
+	.atomic_get_property = amdgpu_dm_connector_atomic_get_property
+};
+
+static struct drm_encoder *best_encoder(struct drm_connector *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+
+	DRM_DEBUG_DRIVER("Finding the best encoder\n");
+
+	/* pick the encoder ids */
+	if (enc_id) {
+		obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
+		if (!obj) {
+			DRM_ERROR("Couldn't find a matching encoder for our connector\n");
+			return NULL;
+		}
+		encoder = obj_to_encoder(obj);
+		return encoder;
+	}
+	DRM_ERROR("No encoder id\n");
+	return NULL;
+}
+
+static int get_modes(struct drm_connector *connector)
+{
+	return amdgpu_dm_connector_get_modes(connector);
+}
+
+static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
+{
+	struct dc_sink_init_data init_params = {
+			.link = aconnector->dc_link,
+			.sink_signal = SIGNAL_TYPE_VIRTUAL
+	};
+	struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+
+	if (!aconnector->base.edid_blob_ptr ||
+		!aconnector->base.edid_blob_ptr->data) {
+		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
+				aconnector->base.name);
+
+		aconnector->base.force = DRM_FORCE_OFF;
+		aconnector->base.override_edid = false;
+		return;
+	}
+
+	aconnector->edid = edid;
+
+	aconnector->dc_em_sink = dc_link_add_remote_sink(
+		aconnector->dc_link,
+		(uint8_t *)edid,
+		(edid->extensions + 1) * EDID_LENGTH,
+		&init_params);
+
+	if (aconnector->base.force == DRM_FORCE_ON)
+		aconnector->dc_sink = aconnector->dc_link->local_sink ?
+		aconnector->dc_link->local_sink :
+		aconnector->dc_em_sink;
+}
+
+static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
+{
+	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
+
+	/* In case of headless boot with force on for DP managed connector
+	 * Those settings have to be != 0 to get initial modeset
+	 */
+	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
+		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
+	}
+
+
+	aconnector->base.override_edid = true;
+	create_eml_sink(aconnector);
+}
+
+int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+				   struct drm_display_mode *mode)
+{
+	int result = MODE_ERROR;
+	struct dc_sink *dc_sink;
+	struct amdgpu_device *adev = connector->dev->dev_private;
+	/* TODO: Unhardcode stream count */
+	struct dc_stream_state *stream;
+	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
+		return result;
+
+	/* Only run this the first time mode_valid is called to initilialize
+	 * EDID mgmt
+	 */
+	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
+		!aconnector->dc_em_sink)
+		handle_edid_mgmt(aconnector);
+
+	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
+
+	if (dc_sink == NULL) {
+		DRM_ERROR("dc_sink is NULL!\n");
+		goto fail;
+	}
+
+	stream = dc_create_stream_for_sink(dc_sink);
+	if (stream == NULL) {
+		DRM_ERROR("Failed to create stream for sink!\n");
+		goto fail;
+	}
+
+	drm_mode_set_crtcinfo(mode, 0);
+	fill_stream_properties_from_drm_display_mode(stream, mode, connector);
+
+	stream->src.width = mode->hdisplay;
+	stream->src.height = mode->vdisplay;
+	stream->dst = stream->src;
+
+	if (dc_validate_stream(adev->dm.dc, stream) == DC_OK)
+		result = MODE_OK;
+
+	dc_stream_release(stream);
+
+fail:
+	/* TODO: error handling*/
+	return result;
+}
+
+static const struct drm_connector_helper_funcs
+amdgpu_dm_connector_helper_funcs = {
+	/*
+	 * If hotplug a second bigger display in FB Con mode, bigger resolution
+	 * modes will be filtered by drm_mode_validate_size(), and those modes
+	 * is missing after user start lightdm. So we need to renew modes list.
+	 * in get_modes call back, not just return the modes count
+	 */
+	.get_modes = get_modes,
+	.mode_valid = amdgpu_dm_connector_mode_valid,
+	.best_encoder = best_encoder
+};
+
+static void dm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+}
+
+static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
+				       struct drm_crtc_state *state)
+{
+	struct amdgpu_device *adev = crtc->dev->dev_private;
+	struct dc *dc = adev->dm.dc;
+	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
+	int ret = -EINVAL;
+
+	if (unlikely(!dm_crtc_state->stream &&
+		     modeset_required(state, NULL, dm_crtc_state->stream))) {
+		WARN_ON(1);
+		return ret;
+	}
+
+	/* In some use cases, like reset, no stream  is attached */
+	if (!dm_crtc_state->stream)
+		return 0;
+
+	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
+		return 0;
+
+	return ret;
+}
+
+static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
+				      const struct drm_display_mode *mode,
+				      struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
+	.disable = dm_crtc_helper_disable,
+	.atomic_check = dm_crtc_helper_atomic_check,
+	.mode_fixup = dm_crtc_helper_mode_fixup
+};
+
+static void dm_encoder_helper_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+					  struct drm_crtc_state *crtc_state,
+					  struct drm_connector_state *conn_state)
+{
+	return 0;
+}
+
+const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
+	.disable = dm_encoder_helper_disable,
+	.atomic_check = dm_encoder_helper_atomic_check
+};
+
+static void dm_drm_plane_reset(struct drm_plane *plane)
+{
+	struct dm_plane_state *amdgpu_state = NULL;
+
+	if (plane->state)
+		plane->funcs->atomic_destroy_state(plane, plane->state);
+
+	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
+	WARN_ON(amdgpu_state == NULL);
+	
+	if (amdgpu_state) {
+		plane->state = &amdgpu_state->base;
+		plane->state->plane = plane;
+		plane->state->rotation = DRM_MODE_ROTATE_0;
+	}
+}
+
+static struct drm_plane_state *
+dm_drm_plane_duplicate_state(struct drm_plane *plane)
+{
+	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
+
+	old_dm_plane_state = to_dm_plane_state(plane->state);
+	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
+	if (!dm_plane_state)
+		return NULL;
+
+	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
+
+	if (old_dm_plane_state->dc_state) {
+		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
+		dc_plane_state_retain(dm_plane_state->dc_state);
+	}
+
+	return &dm_plane_state->base;
+}
+
+void dm_drm_plane_destroy_state(struct drm_plane *plane,
+				struct drm_plane_state *state)
+{
+	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+
+	if (dm_plane_state->dc_state)
+		dc_plane_state_release(dm_plane_state->dc_state);
+
+	drm_atomic_helper_plane_destroy_state(plane, state);
+}
+
+static const struct drm_plane_funcs dm_plane_funcs = {
+	.update_plane	= drm_atomic_helper_update_plane,
+	.disable_plane	= drm_atomic_helper_disable_plane,
+	.destroy	= drm_plane_cleanup,
+	.reset = dm_drm_plane_reset,
+	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
+	.atomic_destroy_state = dm_drm_plane_destroy_state,
+};
+
+static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
+				      struct drm_plane_state *new_state)
+{
+	struct amdgpu_framebuffer *afb;
+	struct drm_gem_object *obj;
+	struct amdgpu_bo *rbo;
+	uint64_t chroma_addr = 0;
+	int r;
+	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
+	unsigned int awidth;
+
+	dm_plane_state_old = to_dm_plane_state(plane->state);
+	dm_plane_state_new = to_dm_plane_state(new_state);
+
+	if (!new_state->fb) {
+		DRM_DEBUG_DRIVER("No FB bound\n");
+		return 0;
+	}
+
+	afb = to_amdgpu_framebuffer(new_state->fb);
+
+	obj = afb->obj;
+	rbo = gem_to_amdgpu_bo(obj);
+	r = amdgpu_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+
+	r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
+
+
+	amdgpu_bo_unreserve(rbo);
+
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS)
+			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
+		return r;
+	}
+
+	amdgpu_bo_ref(rbo);
+
+	if (dm_plane_state_new->dc_state &&
+			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
+		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
+
+		if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+			plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
+			plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
+		} else {
+			awidth = ALIGN(new_state->fb->width, 64);
+			plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
+			plane_state->address.video_progressive.luma_addr.low_part
+							= lower_32_bits(afb->address);
+			plane_state->address.video_progressive.luma_addr.high_part
+							= upper_32_bits(afb->address);
+			chroma_addr = afb->address + (u64)(awidth * new_state->fb->height);
+			plane_state->address.video_progressive.chroma_addr.low_part
+							= lower_32_bits(chroma_addr);
+			plane_state->address.video_progressive.chroma_addr.high_part
+							= upper_32_bits(chroma_addr);
+		}
+	}
+
+	/* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
+	 * prepare and cleanup in drm_atomic_helper_prepare_planes
+	 * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
+	 * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
+	 * code touching fram buffers should be avoided for DC.
+	 */
+	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
+
+		acrtc->cursor_bo = obj;
+	}
+	return 0;
+}
+
+static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
+				       struct drm_plane_state *old_state)
+{
+	struct amdgpu_bo *rbo;
+	struct amdgpu_framebuffer *afb;
+	int r;
+
+	if (!old_state->fb)
+		return;
+
+	afb = to_amdgpu_framebuffer(old_state->fb);
+	rbo = gem_to_amdgpu_bo(afb->obj);
+	r = amdgpu_bo_reserve(rbo, false);
+	if (unlikely(r)) {
+		DRM_ERROR("failed to reserve rbo before unpin\n");
+		return;
+	}
+
+	amdgpu_bo_unpin(rbo);
+	amdgpu_bo_unreserve(rbo);
+	amdgpu_bo_unref(&rbo);
+}
+
+static int dm_plane_atomic_check(struct drm_plane *plane,
+				 struct drm_plane_state *state)
+{
+	struct amdgpu_device *adev = plane->dev->dev_private;
+	struct dc *dc = adev->dm.dc;
+	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+
+	if (!dm_plane_state->dc_state)
+		return 0;
+
+	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
+		return 0;
+
+	return -EINVAL;
+}
+
+static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
+	.prepare_fb = dm_plane_helper_prepare_fb,
+	.cleanup_fb = dm_plane_helper_cleanup_fb,
+	.atomic_check = dm_plane_atomic_check,
+};
+
+/*
+ * TODO: these are currently initialized to rgb formats only.
+ * For future use cases we should either initialize them dynamically based on
+ * plane capabilities, or initialize this array to all formats, so internal drm
+ * check will succeed, and let DC to implement proper check
+ */
+static const uint32_t rgb_formats[] = {
+	DRM_FORMAT_RGB888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_RGBA8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_ARGB2101010,
+	DRM_FORMAT_ABGR2101010,
+};
+
+static const uint32_t yuv_formats[] = {
+	DRM_FORMAT_NV12,
+	DRM_FORMAT_NV21,
+};
+
+static const u32 cursor_formats[] = {
+	DRM_FORMAT_ARGB8888
+};
+
+static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+				struct amdgpu_plane *aplane,
+				unsigned long possible_crtcs)
+{
+	int res = -EPERM;
+
+	switch (aplane->base.type) {
+	case DRM_PLANE_TYPE_PRIMARY:
+		aplane->base.format_default = true;
+
+		res = drm_universal_plane_init(
+				dm->adev->ddev,
+				&aplane->base,
+				possible_crtcs,
+				&dm_plane_funcs,
+				rgb_formats,
+				ARRAY_SIZE(rgb_formats),
+				NULL, aplane->base.type, NULL);
+		break;
+	case DRM_PLANE_TYPE_OVERLAY:
+		res = drm_universal_plane_init(
+				dm->adev->ddev,
+				&aplane->base,
+				possible_crtcs,
+				&dm_plane_funcs,
+				yuv_formats,
+				ARRAY_SIZE(yuv_formats),
+				NULL, aplane->base.type, NULL);
+		break;
+	case DRM_PLANE_TYPE_CURSOR:
+		res = drm_universal_plane_init(
+				dm->adev->ddev,
+				&aplane->base,
+				possible_crtcs,
+				&dm_plane_funcs,
+				cursor_formats,
+				ARRAY_SIZE(cursor_formats),
+				NULL, aplane->base.type, NULL);
+		break;
+	}
+
+	drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
+
+	/* Create (reset) the plane state */
+	if (aplane->base.funcs->reset)
+		aplane->base.funcs->reset(&aplane->base);
+
+
+	return res;
+}
+
+static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+			       struct drm_plane *plane,
+			       uint32_t crtc_index)
+{
+	struct amdgpu_crtc *acrtc = NULL;
+	struct amdgpu_plane *cursor_plane;
+
+	int res = -ENOMEM;
+
+	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
+	if (!cursor_plane)
+		goto fail;
+
+	cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
+	res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
+
+	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
+	if (!acrtc)
+		goto fail;
+
+	res = drm_crtc_init_with_planes(
+			dm->ddev,
+			&acrtc->base,
+			plane,
+			&cursor_plane->base,
+			&amdgpu_dm_crtc_funcs, NULL);
+
+	if (res)
+		goto fail;
+
+	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
+
+	/* Create (reset) the plane state */
+	if (acrtc->base.funcs->reset)
+		acrtc->base.funcs->reset(&acrtc->base);
+
+	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
+	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
+
+	acrtc->crtc_id = crtc_index;
+	acrtc->base.enabled = false;
+
+	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
+	drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
+
+	return 0;
+
+fail:
+	kfree(acrtc);
+	kfree(cursor_plane);
+	return res;
+}
+
+
+static int to_drm_connector_type(enum signal_type st)
+{
+	switch (st) {
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		return DRM_MODE_CONNECTOR_HDMIA;
+	case SIGNAL_TYPE_EDP:
+		return DRM_MODE_CONNECTOR_eDP;
+	case SIGNAL_TYPE_RGB:
+		return DRM_MODE_CONNECTOR_VGA;
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		return DRM_MODE_CONNECTOR_DisplayPort;
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+		return DRM_MODE_CONNECTOR_DVID;
+	case SIGNAL_TYPE_VIRTUAL:
+		return DRM_MODE_CONNECTOR_VIRTUAL;
+
+	default:
+		return DRM_MODE_CONNECTOR_Unknown;
+	}
+}
+
+static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
+{
+	const struct drm_connector_helper_funcs *helper =
+		connector->helper_private;
+	struct drm_encoder *encoder;
+	struct amdgpu_encoder *amdgpu_encoder;
+
+	encoder = helper->best_encoder(connector);
+
+	if (encoder == NULL)
+		return;
+
+	amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+	amdgpu_encoder->native_mode.clock = 0;
+
+	if (!list_empty(&connector->probed_modes)) {
+		struct drm_display_mode *preferred_mode = NULL;
+
+		list_for_each_entry(preferred_mode,
+				    &connector->probed_modes,
+				    head) {
+			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
+				amdgpu_encoder->native_mode = *preferred_mode;
+
+			break;
+		}
+
+	}
+}
+
+static struct drm_display_mode *
+amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
+			     char *name,
+			     int hdisplay, int vdisplay)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_display_mode *mode = NULL;
+	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+
+	mode = drm_mode_duplicate(dev, native_mode);
+
+	if (mode == NULL)
+		return NULL;
+
+	mode->hdisplay = hdisplay;
+	mode->vdisplay = vdisplay;
+	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+	strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+
+	return mode;
+
+}
+
+static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
+						 struct drm_connector *connector)
+{
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_display_mode *mode = NULL;
+	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+	struct amdgpu_dm_connector *amdgpu_dm_connector =
+				to_amdgpu_dm_connector(connector);
+	int i;
+	int n;
+	struct mode_size {
+		char name[DRM_DISPLAY_MODE_LEN];
+		int w;
+		int h;
+	} common_modes[] = {
+		{  "640x480",  640,  480},
+		{  "800x600",  800,  600},
+		{ "1024x768", 1024,  768},
+		{ "1280x720", 1280,  720},
+		{ "1280x800", 1280,  800},
+		{"1280x1024", 1280, 1024},
+		{ "1440x900", 1440,  900},
+		{"1680x1050", 1680, 1050},
+		{"1600x1200", 1600, 1200},
+		{"1920x1080", 1920, 1080},
+		{"1920x1200", 1920, 1200}
+	};
+
+	n = ARRAY_SIZE(common_modes);
+
+	for (i = 0; i < n; i++) {
+		struct drm_display_mode *curmode = NULL;
+		bool mode_existed = false;
+
+		if (common_modes[i].w > native_mode->hdisplay ||
+		    common_modes[i].h > native_mode->vdisplay ||
+		   (common_modes[i].w == native_mode->hdisplay &&
+		    common_modes[i].h == native_mode->vdisplay))
+			continue;
+
+		list_for_each_entry(curmode, &connector->probed_modes, head) {
+			if (common_modes[i].w == curmode->hdisplay &&
+			    common_modes[i].h == curmode->vdisplay) {
+				mode_existed = true;
+				break;
+			}
+		}
+
+		if (mode_existed)
+			continue;
+
+		mode = amdgpu_dm_create_common_mode(encoder,
+				common_modes[i].name, common_modes[i].w,
+				common_modes[i].h);
+		drm_mode_probed_add(connector, mode);
+		amdgpu_dm_connector->num_modes++;
+	}
+}
+
+static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
+					      struct edid *edid)
+{
+	struct amdgpu_dm_connector *amdgpu_dm_connector =
+			to_amdgpu_dm_connector(connector);
+
+	if (edid) {
+		/* empty probed_modes */
+		INIT_LIST_HEAD(&connector->probed_modes);
+		amdgpu_dm_connector->num_modes =
+				drm_add_edid_modes(connector, edid);
+
+		drm_edid_to_eld(connector, edid);
+
+		amdgpu_dm_get_native_mode(connector);
+	} else {
+		amdgpu_dm_connector->num_modes = 0;
+	}
+}
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+{
+	const struct drm_connector_helper_funcs *helper =
+			connector->helper_private;
+	struct amdgpu_dm_connector *amdgpu_dm_connector =
+			to_amdgpu_dm_connector(connector);
+	struct drm_encoder *encoder;
+	struct edid *edid = amdgpu_dm_connector->edid;
+
+	encoder = helper->best_encoder(connector);
+
+	amdgpu_dm_connector_ddc_get_modes(connector, edid);
+	amdgpu_dm_connector_add_common_modes(encoder, connector);
+	return amdgpu_dm_connector->num_modes;
+}
+
+void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+				     struct amdgpu_dm_connector *aconnector,
+				     int connector_type,
+				     struct dc_link *link,
+				     int link_index)
+{
+	struct amdgpu_device *adev = dm->ddev->dev_private;
+
+	aconnector->connector_id = link_index;
+	aconnector->dc_link = link;
+	aconnector->base.interlace_allowed = false;
+	aconnector->base.doublescan_allowed = false;
+	aconnector->base.stereo_allowed = false;
+	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
+	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
+
+	mutex_init(&aconnector->hpd_lock);
+
+	/* configure support HPD hot plug connector_>polled default value is 0
+	 * which means HPD hot plug not supported
+	 */
+	switch (connector_type) {
+	case DRM_MODE_CONNECTOR_HDMIA:
+		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+		break;
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+		break;
+	case DRM_MODE_CONNECTOR_DVID:
+		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+		break;
+	default:
+		break;
+	}
+
+	drm_object_attach_property(&aconnector->base.base,
+				dm->ddev->mode_config.scaling_mode_property,
+				DRM_MODE_SCALE_NONE);
+
+	drm_object_attach_property(&aconnector->base.base,
+				adev->mode_info.underscan_property,
+				UNDERSCAN_OFF);
+	drm_object_attach_property(&aconnector->base.base,
+				adev->mode_info.underscan_hborder_property,
+				0);
+	drm_object_attach_property(&aconnector->base.base,
+				adev->mode_info.underscan_vborder_property,
+				0);
+
+}
+
+static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+			      struct i2c_msg *msgs, int num)
+{
+	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
+	struct ddc_service *ddc_service = i2c->ddc_service;
+	struct i2c_command cmd;
+	int i;
+	int result = -EIO;
+
+	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
+
+	if (!cmd.payloads)
+		return result;
+
+	cmd.number_of_payloads = num;
+	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+	cmd.speed = 100;
+
+	for (i = 0; i < num; i++) {
+		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
+		cmd.payloads[i].address = msgs[i].addr;
+		cmd.payloads[i].length = msgs[i].len;
+		cmd.payloads[i].data = msgs[i].buf;
+	}
+
+	if (dal_i2caux_submit_i2c_command(
+			ddc_service->ctx->i2caux,
+			ddc_service->ddc_pin,
+			&cmd))
+		result = num;
+
+	kfree(cmd.payloads);
+	return result;
+}
+
+static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
+	.master_xfer = amdgpu_dm_i2c_xfer,
+	.functionality = amdgpu_dm_i2c_func,
+};
+
+static struct amdgpu_i2c_adapter *
+create_i2c(struct ddc_service *ddc_service,
+	   int link_index,
+	   int *res)
+{
+	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
+	struct amdgpu_i2c_adapter *i2c;
+
+	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
+	if (!i2c)
+		return NULL;
+	i2c->base.owner = THIS_MODULE;
+	i2c->base.class = I2C_CLASS_DDC;
+	i2c->base.dev.parent = &adev->pdev->dev;
+	i2c->base.algo = &amdgpu_dm_i2c_algo;
+	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
+	i2c_set_adapdata(&i2c->base, i2c);
+	i2c->ddc_service = ddc_service;
+
+	return i2c;
+}
+
+/* Note: this function assumes that dc_link_detect() was called for the
+ * dc_link which will be represented by this aconnector.
+ */
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+				    struct amdgpu_dm_connector *aconnector,
+				    uint32_t link_index,
+				    struct amdgpu_encoder *aencoder)
+{
+	int res = 0;
+	int connector_type;
+	struct dc *dc = dm->dc;
+	struct dc_link *link = dc_get_link_at_index(dc, link_index);
+	struct amdgpu_i2c_adapter *i2c;
+
+	link->priv = aconnector;
+
+	DRM_DEBUG_DRIVER("%s()\n", __func__);
+
+	i2c = create_i2c(link->ddc, link->link_index, &res);
+	if (!i2c) {
+		DRM_ERROR("Failed to create i2c adapter data\n");
+		return -ENOMEM;
+	}
+
+	aconnector->i2c = i2c;
+	res = i2c_add_adapter(&i2c->base);
+
+	if (res) {
+		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+		goto out_free;
+	}
+
+	connector_type = to_drm_connector_type(link->connector_signal);
+
+	res = drm_connector_init(
+			dm->ddev,
+			&aconnector->base,
+			&amdgpu_dm_connector_funcs,
+			connector_type);
+
+	if (res) {
+		DRM_ERROR("connector_init failed\n");
+		aconnector->connector_id = -1;
+		goto out_free;
+	}
+
+	drm_connector_helper_add(
+			&aconnector->base,
+			&amdgpu_dm_connector_helper_funcs);
+
+	if (aconnector->base.funcs->reset)
+		aconnector->base.funcs->reset(&aconnector->base);
+
+	amdgpu_dm_connector_init_helper(
+		dm,
+		aconnector,
+		connector_type,
+		link,
+		link_index);
+
+	drm_mode_connector_attach_encoder(
+		&aconnector->base, &aencoder->base);
+
+	drm_connector_register(&aconnector->base);
+
+	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
+		|| connector_type == DRM_MODE_CONNECTOR_eDP)
+		amdgpu_dm_initialize_dp_connector(dm, aconnector);
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+	/* NOTE: this currently will create backlight device even if a panel
+	 * is not connected to the eDP/LVDS connector.
+	 *
+	 * This is less than ideal but we don't have sink information at this
+	 * stage since detection happens after. We can't do detection earlier
+	 * since MST detection needs connectors to be created first.
+	 */
+	if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
+		/* Event if registration failed, we should continue with
+		 * DM initialization because not having a backlight control
+		 * is better then a black screen.
+		 */
+		amdgpu_dm_register_backlight_device(dm);
+
+		if (dm->backlight_dev)
+			dm->backlight_link = link;
+	}
+#endif
+
+out_free:
+	if (res) {
+		kfree(i2c);
+		aconnector->i2c = NULL;
+	}
+	return res;
+}
+
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
+{
+	switch (adev->mode_info.num_crtc) {
+	case 1:
+		return 0x1;
+	case 2:
+		return 0x3;
+	case 3:
+		return 0x7;
+	case 4:
+		return 0xf;
+	case 5:
+		return 0x1f;
+	case 6:
+	default:
+		return 0x3f;
+	}
+}
+
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+				  struct amdgpu_encoder *aencoder,
+				  uint32_t link_index)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+
+	int res = drm_encoder_init(dev,
+				   &aencoder->base,
+				   &amdgpu_dm_encoder_funcs,
+				   DRM_MODE_ENCODER_TMDS,
+				   NULL);
+
+	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+
+	if (!res)
+		aencoder->encoder_id = link_index;
+	else
+		aencoder->encoder_id = -1;
+
+	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
+
+	return res;
+}
+
+static void manage_dm_interrupts(struct amdgpu_device *adev,
+				 struct amdgpu_crtc *acrtc,
+				 bool enable)
+{
+	/*
+	 * this is not correct translation but will work as soon as VBLANK
+	 * constant is the same as PFLIP
+	 */
+	int irq_type =
+		amdgpu_crtc_idx_to_irq_type(
+			adev,
+			acrtc->crtc_id);
+
+	if (enable) {
+		drm_crtc_vblank_on(&acrtc->base);
+		amdgpu_irq_get(
+			adev,
+			&adev->pageflip_irq,
+			irq_type);
+	} else {
+
+		amdgpu_irq_put(
+			adev,
+			&adev->pageflip_irq,
+			irq_type);
+		drm_crtc_vblank_off(&acrtc->base);
+	}
+}
+
+static bool
+is_scaling_state_different(const struct dm_connector_state *dm_state,
+			   const struct dm_connector_state *old_dm_state)
+{
+	if (dm_state->scaling != old_dm_state->scaling)
+		return true;
+	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
+		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
+			return true;
+	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
+		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
+			return true;
+	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
+		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
+		return true;
+	return false;
+}
+
+static void remove_stream(struct amdgpu_device *adev,
+			  struct amdgpu_crtc *acrtc,
+			  struct dc_stream_state *stream)
+{
+	/* this is the update mode case */
+	if (adev->dm.freesync_module)
+		mod_freesync_remove_stream(adev->dm.freesync_module, stream);
+
+	acrtc->otg_inst = -1;
+	acrtc->enabled = false;
+}
+
+static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
+			       struct dc_cursor_position *position)
+{
+	struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
+	int x, y;
+	int xorigin = 0, yorigin = 0;
+
+	if (!crtc || !plane->state->fb) {
+		position->enable = false;
+		position->x = 0;
+		position->y = 0;
+		return 0;
+	}
+
+	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
+	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
+		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
+			  __func__,
+			  plane->state->crtc_w,
+			  plane->state->crtc_h);
+		return -EINVAL;
+	}
+
+	x = plane->state->crtc_x;
+	y = plane->state->crtc_y;
+	/* avivo cursor are offset into the total surface */
+	x += crtc->primary->state->src_x >> 16;
+	y += crtc->primary->state->src_y >> 16;
+	if (x < 0) {
+		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+		x = 0;
+	}
+	if (y < 0) {
+		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+		y = 0;
+	}
+	position->enable = true;
+	position->x = x;
+	position->y = y;
+	position->x_hotspot = xorigin;
+	position->y_hotspot = yorigin;
+
+	return 0;
+}
+
+static void handle_cursor_update(struct drm_plane *plane,
+				 struct drm_plane_state *old_plane_state)
+{
+	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
+	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
+	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	uint64_t address = afb ? afb->address : 0;
+	struct dc_cursor_position position;
+	struct dc_cursor_attributes attributes;
+	int ret;
+
+	if (!plane->state->fb && !old_plane_state->fb)
+		return;
+
+	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
+			 __func__,
+			 amdgpu_crtc->crtc_id,
+			 plane->state->crtc_w,
+			 plane->state->crtc_h);
+
+	ret = get_cursor_position(plane, crtc, &position);
+	if (ret)
+		return;
+
+	if (!position.enable) {
+		/* turn off cursor */
+		if (crtc_state && crtc_state->stream)
+			dc_stream_set_cursor_position(crtc_state->stream,
+						      &position);
+		return;
+	}
+
+	amdgpu_crtc->cursor_width = plane->state->crtc_w;
+	amdgpu_crtc->cursor_height = plane->state->crtc_h;
+
+	attributes.address.high_part = upper_32_bits(address);
+	attributes.address.low_part  = lower_32_bits(address);
+	attributes.width             = plane->state->crtc_w;
+	attributes.height            = plane->state->crtc_h;
+	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
+	attributes.rotation_angle    = 0;
+	attributes.attribute_flags.value = 0;
+
+	attributes.pitch = attributes.width;
+
+	if (crtc_state->stream) {
+		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
+							 &attributes))
+			DRM_ERROR("DC failed to set cursor attributes\n");
+
+		if (!dc_stream_set_cursor_position(crtc_state->stream,
+						   &position))
+			DRM_ERROR("DC failed to set cursor position\n");
+	}
+}
+
+static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
+{
+
+	assert_spin_locked(&acrtc->base.dev->event_lock);
+	WARN_ON(acrtc->event);
+
+	acrtc->event = acrtc->base.state->event;
+
+	/* Set the flip status */
+	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+
+	/* Mark this event as consumed */
+	acrtc->base.state->event = NULL;
+
+	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
+						 acrtc->crtc_id);
+}
+
+/*
+ * Executes flip
+ *
+ * Waits on all BO's fences and for proper vblank count
+ */
+static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
+			      struct drm_framebuffer *fb,
+			      uint32_t target,
+			      struct dc_state *state)
+{
+	unsigned long flags;
+	uint32_t target_vblank;
+	int r, vpos, hpos;
+	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
+	struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
+	struct amdgpu_device *adev = crtc->dev->dev_private;
+	bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
+	struct dc_flip_addrs addr = { {0} };
+	/* TODO eliminate or rename surface_update */
+	struct dc_surface_update surface_updates[1] = { {0} };
+	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
+
+
+	/* Prepare wait for target vblank early - before the fence-waits */
+	target_vblank = target - drm_crtc_vblank_count(crtc) +
+			amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
+
+	/* TODO This might fail and hence better not used, wait
+	 * explicitly on fences instead
+	 * and in general should be called for
+	 * blocking commit to as per framework helpers
+	 */
+	r = amdgpu_bo_reserve(abo, true);
+	if (unlikely(r != 0)) {
+		DRM_ERROR("failed to reserve buffer before flip\n");
+		WARN_ON(1);
+	}
+
+	/* Wait for all fences on this FB */
+	WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
+								    MAX_SCHEDULE_TIMEOUT) < 0);
+
+	amdgpu_bo_unreserve(abo);
+
+	/* Wait until we're out of the vertical blank period before the one
+	 * targeted by the flip
+	 */
+	while ((acrtc->enabled &&
+		(amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
+					&vpos, &hpos, NULL, NULL,
+					&crtc->hwmode)
+		 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+		(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+		(int)(target_vblank -
+		  amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
+		usleep_range(1000, 1100);
+	}
+
+	/* Flip */
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+	/* update crtc fb */
+	crtc->primary->fb = fb;
+
+	WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
+	WARN_ON(!acrtc_state->stream);
+
+	addr.address.grph.addr.low_part = lower_32_bits(afb->address);
+	addr.address.grph.addr.high_part = upper_32_bits(afb->address);
+	addr.flip_immediate = async_flip;
+
+
+	if (acrtc->base.state->event)
+		prepare_flip_isr(acrtc);
+
+	surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
+	surface_updates->flip_addr = &addr;
+
+
+	dc_commit_updates_for_stream(adev->dm.dc,
+					     surface_updates,
+					     1,
+					     acrtc_state->stream,
+					     NULL,
+					     &surface_updates->surface,
+					     state);
+
+	DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
+			 __func__,
+			 addr.address.grph.addr.high_part,
+			 addr.address.grph.addr.low_part);
+
+
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
+static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+				    struct drm_device *dev,
+				    struct amdgpu_display_manager *dm,
+				    struct drm_crtc *pcrtc,
+				    bool *wait_for_vblank)
+{
+	uint32_t i;
+	struct drm_plane *plane;
+	struct drm_plane_state *old_plane_state, *new_plane_state;
+	struct dc_stream_state *dc_stream_attach;
+	struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
+	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
+	struct drm_crtc_state *new_pcrtc_state =
+			drm_atomic_get_new_crtc_state(state, pcrtc);
+	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
+	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+	int planes_count = 0;
+	unsigned long flags;
+
+	/* update planes when needed */
+	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+		struct drm_crtc *crtc = new_plane_state->crtc;
+		struct drm_crtc_state *new_crtc_state;
+		struct drm_framebuffer *fb = new_plane_state->fb;
+		bool pflip_needed;
+		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
+
+		if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+			handle_cursor_update(plane, old_plane_state);
+			continue;
+		}
+
+		if (!fb || !crtc || pcrtc != crtc)
+			continue;
+
+		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+		if (!new_crtc_state->active)
+			continue;
+
+		pflip_needed = !state->allow_modeset;
+
+		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+		if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
+			DRM_ERROR("%s: acrtc %d, already busy\n",
+				  __func__,
+				  acrtc_attach->crtc_id);
+			/* In commit tail framework this cannot happen */
+			WARN_ON(1);
+		}
+		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+		if (!pflip_needed) {
+			WARN_ON(!dm_new_plane_state->dc_state);
+
+			plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
+
+			dc_stream_attach = acrtc_state->stream;
+			planes_count++;
+
+		} else if (new_crtc_state->planes_changed) {
+			/* Assume even ONE crtc with immediate flip means
+			 * entire can't wait for VBLANK
+			 * TODO Check if it's correct
+			 */
+			*wait_for_vblank =
+					new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
+				false : true;
+
+			/* TODO: Needs rework for multiplane flip */
+			if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+				drm_crtc_vblank_get(crtc);
+
+			amdgpu_dm_do_flip(
+				crtc,
+				fb,
+				drm_crtc_vblank_count(crtc) + *wait_for_vblank,
+				dm_state->context);
+		}
+
+	}
+
+	if (planes_count) {
+		unsigned long flags;
+
+		if (new_pcrtc_state->event) {
+
+			drm_crtc_vblank_get(pcrtc);
+
+			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+			prepare_flip_isr(acrtc_attach);
+			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+		}
+
+		if (false == dc_commit_planes_to_stream(dm->dc,
+							plane_states_constructed,
+							planes_count,
+							dc_stream_attach,
+							dm_state->context))
+			dm_error("%s: Failed to attach plane!\n", __func__);
+	} else {
+		/*TODO BUG Here should go disable planes on CRTC. */
+	}
+}
+
+
+static int amdgpu_dm_atomic_commit(struct drm_device *dev,
+				   struct drm_atomic_state *state,
+				   bool nonblock)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+	struct amdgpu_device *adev = dev->dev_private;
+	int i;
+
+	/*
+	 * We evade vblanks and pflips on crtc that
+	 * should be changed. We do it here to flush & disable
+	 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
+	 * it will update crtc->dm_crtc_state->stream pointer which is used in
+	 * the ISRs.
+	 */
+	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+		if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
+			manage_dm_interrupts(adev, acrtc, false);
+	}
+	/* Add check here for SoC's that support hardware cursor plane, to
+	 * unset legacy_cursor_update */
+
+	return drm_atomic_helper_commit(dev, state, nonblock);
+
+	/*TODO Handle EINTR, reenable IRQ*/
+}
+
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+	struct drm_device *dev = state->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_display_manager *dm = &adev->dm;
+	struct dm_atomic_state *dm_state;
+	uint32_t i, j;
+	uint32_t new_crtcs_count = 0;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+	struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
+	struct dc_stream_state *new_stream = NULL;
+	unsigned long flags;
+	bool wait_for_vblank = true;
+	struct drm_connector *connector;
+	struct drm_connector_state *old_con_state, *new_con_state;
+	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+
+	drm_atomic_helper_update_legacy_modeset_state(dev, state);
+
+	dm_state = to_dm_atomic_state(state);
+
+	/* update changed items */
+	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+		DRM_DEBUG_DRIVER(
+			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
+			"connectors_changed:%d\n",
+			acrtc->crtc_id,
+			new_crtc_state->enable,
+			new_crtc_state->active,
+			new_crtc_state->planes_changed,
+			new_crtc_state->mode_changed,
+			new_crtc_state->active_changed,
+			new_crtc_state->connectors_changed);
+
+		/* handles headless hotplug case, updating new_state and
+		 * aconnector as needed
+		 */
+
+		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
+
+			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
+
+			if (!dm_new_crtc_state->stream) {
+				/*
+				 * this could happen because of issues with
+				 * userspace notifications delivery.
+				 * In this case userspace tries to set mode on
+				 * display which is disconnect in fact.
+				 * dc_sink in NULL in this case on aconnector.
+				 * We expect reset mode will come soon.
+				 *
+				 * This can also happen when unplug is done
+				 * during resume sequence ended
+				 *
+				 * In this case, we want to pretend we still
+				 * have a sink to keep the pipe running so that
+				 * hw state is consistent with the sw state
+				 */
+				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+						__func__, acrtc->base.base.id);
+				continue;
+			}
+
+
+			if (dm_old_crtc_state->stream)
+				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+
+
+			/*
+			 * this loop saves set mode crtcs
+			 * we needed to enable vblanks once all
+			 * resources acquired in dc after dc_commit_streams
+			 */
+
+			/*TODO move all this into dm_crtc_state, get rid of
+			 * new_crtcs array and use old and new atomic states
+			 * instead
+			 */
+			new_crtcs[new_crtcs_count] = acrtc;
+			new_crtcs_count++;
+
+			new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+			acrtc->enabled = true;
+			acrtc->hw_mode = new_crtc_state->mode;
+			crtc->hwmode = new_crtc_state->mode;
+		} else if (modereset_required(new_crtc_state)) {
+			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
+
+			/* i.e. reset mode */
+			if (dm_old_crtc_state->stream)
+				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+		}
+	} /* for_each_crtc_in_state() */
+
+	/*
+	 * Add streams after required streams from new and replaced streams
+	 * are removed from freesync module
+	 */
+	if (adev->dm.freesync_module) {
+		for (i = 0; i < new_crtcs_count; i++) {
+			struct amdgpu_dm_connector *aconnector = NULL;
+
+			new_crtc_state = drm_atomic_get_new_crtc_state(state,
+					&new_crtcs[i]->base);
+			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+			new_stream = dm_new_crtc_state->stream;
+			aconnector = amdgpu_dm_find_first_crtc_matching_connector(
+					state,
+					&new_crtcs[i]->base);
+			if (!aconnector) {
+				DRM_DEBUG_DRIVER("Atomic commit: Failed to find connector for acrtc id:%d "
+					 "skipping freesync init\n",
+					 new_crtcs[i]->crtc_id);
+				continue;
+			}
+
+			mod_freesync_add_stream(adev->dm.freesync_module,
+						new_stream, &aconnector->caps);
+		}
+	}
+
+	if (dm_state->context)
+		WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
+
+	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+		if (dm_new_crtc_state->stream != NULL) {
+			const struct dc_stream_status *status =
+					dc_stream_get_status(dm_new_crtc_state->stream);
+
+			if (!status)
+				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
+			else
+				acrtc->otg_inst = status->primary_otg_inst;
+		}
+	}
+
+	/* Handle scaling and underscan changes*/
+	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+		struct dc_stream_status *status = NULL;
+
+		if (acrtc)
+			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+		/* Skip any modesets/resets */
+		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
+			continue;
+
+		/* Skip any thing not scale or underscan changes */
+		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
+			continue;
+
+		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+		update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
+				dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
+
+		status = dc_stream_get_status(dm_new_crtc_state->stream);
+		WARN_ON(!status);
+		WARN_ON(!status->plane_count);
+
+		if (!dm_new_crtc_state->stream)
+			continue;
+
+		/*TODO How it works with MPO ?*/
+		if (!dc_commit_planes_to_stream(
+				dm->dc,
+				status->plane_states,
+				status->plane_count,
+				dm_new_crtc_state->stream,
+				dm_state->context))
+			dm_error("%s: Failed to update stream scaling!\n", __func__);
+	}
+
+	for (i = 0; i < new_crtcs_count; i++) {
+		/*
+		 * loop to enable interrupts on newly arrived crtc
+		 */
+		struct amdgpu_crtc *acrtc = new_crtcs[i];
+
+		new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+		if (adev->dm.freesync_module)
+			mod_freesync_notify_mode_change(
+				adev->dm.freesync_module, &dm_new_crtc_state->stream, 1);
+
+		manage_dm_interrupts(adev, acrtc, true);
+	}
+
+	/* update planes when needed per crtc*/
+	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
+		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+		if (dm_new_crtc_state->stream)
+			amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
+	}
+
+
+	/*
+	 * send vblank event on all events not handled in flip and
+	 * mark consumed event for drm_atomic_helper_commit_hw_done
+	 */
+	spin_lock_irqsave(&adev->ddev->event_lock, flags);
+	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+
+		if (new_crtc_state->event)
+			drm_send_event_locked(dev, &new_crtc_state->event->base);
+
+		new_crtc_state->event = NULL;
+	}
+	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+	/* Signal HW programming completion */
+	drm_atomic_helper_commit_hw_done(state);
+
+	if (wait_for_vblank)
+		drm_atomic_helper_wait_for_vblanks(dev, state);
+
+	drm_atomic_helper_cleanup_planes(dev, state);
+}
+
+
+static int dm_force_atomic_commit(struct drm_connector *connector)
+{
+	int ret = 0;
+	struct drm_device *ddev = connector->dev;
+	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
+	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+	struct drm_plane *plane = disconnected_acrtc->base.primary;
+	struct drm_connector_state *conn_state;
+	struct drm_crtc_state *crtc_state;
+	struct drm_plane_state *plane_state;
+
+	if (!state)
+		return -ENOMEM;
+
+	state->acquire_ctx = ddev->mode_config.acquire_ctx;
+
+	/* Construct an atomic state to restore previous display setting */
+
+	/*
+	 * Attach connectors to drm_atomic_state
+	 */
+	conn_state = drm_atomic_get_connector_state(state, connector);
+
+	ret = PTR_ERR_OR_ZERO(conn_state);
+	if (ret)
+		goto err;
+
+	/* Attach crtc to drm_atomic_state*/
+	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
+
+	ret = PTR_ERR_OR_ZERO(crtc_state);
+	if (ret)
+		goto err;
+
+	/* force a restore */
+	crtc_state->mode_changed = true;
+
+	/* Attach plane to drm_atomic_state */
+	plane_state = drm_atomic_get_plane_state(state, plane);
+
+	ret = PTR_ERR_OR_ZERO(plane_state);
+	if (ret)
+		goto err;
+
+
+	/* Call commit internally with the state we just constructed */
+	ret = drm_atomic_commit(state);
+	if (!ret)
+		return 0;
+
+err:
+	DRM_ERROR("Restoring old state failed with %i\n", ret);
+	drm_atomic_state_put(state);
+
+	return ret;
+}
+
+/*
+ * This functions handle all cases when set mode does not come upon hotplug.
+ * This include when the same display is unplugged then plugged back into the
+ * same port and when we are running without usermode desktop manager supprot
+ */
+void dm_restore_drm_connector_state(struct drm_device *dev,
+				    struct drm_connector *connector)
+{
+	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+	struct amdgpu_crtc *disconnected_acrtc;
+	struct dm_crtc_state *acrtc_state;
+
+	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
+		return;
+
+	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+
+	if (!disconnected_acrtc || !acrtc_state->stream)
+		return;
+
+	/*
+	 * If the previous sink is not released and different from the current,
+	 * we deduce we are in a state where we can not rely on usermode call
+	 * to turn on the display, so we do it here
+	 */
+	if (acrtc_state->stream->sink != aconnector->dc_sink)
+		dm_force_atomic_commit(&aconnector->base);
+}
+
+/*`
+ * Grabs all modesetting locks to serialize against any blocking commits,
+ * Waits for completion of all non blocking commits.
+ */
+static int do_aquire_global_lock(struct drm_device *dev,
+				 struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_commit *commit;
+	long ret;
+
+	/* Adding all modeset locks to aquire_ctx will
+	 * ensure that when the framework release it the
+	 * extra locks we are locking here will get released to
+	 */
+	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
+	if (ret)
+		return ret;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		spin_lock(&crtc->commit_lock);
+		commit = list_first_entry_or_null(&crtc->commit_list,
+				struct drm_crtc_commit, commit_entry);
+		if (commit)
+			drm_crtc_commit_get(commit);
+		spin_unlock(&crtc->commit_lock);
+
+		if (!commit)
+			continue;
+
+		/* Make sure all pending HW programming completed and
+		 * page flips done
+		 */
+		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
+
+		if (ret > 0)
+			ret = wait_for_completion_interruptible_timeout(
+					&commit->flip_done, 10*HZ);
+
+		if (ret == 0)
+			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
+				  "timed out\n", crtc->base.id, crtc->name);
+
+		drm_crtc_commit_put(commit);
+	}
+
+	return ret < 0 ? ret : 0;
+}
+
+static int dm_update_crtcs_state(struct dc *dc,
+				 struct drm_atomic_state *state,
+				 bool enable,
+				 bool *lock_and_validation_needed)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+	int i;
+	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+	struct dc_stream_state *new_stream;
+	int ret = 0;
+
+	/*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
+	/* update changed items */
+	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+		struct amdgpu_crtc *acrtc = NULL;
+		struct amdgpu_dm_connector *aconnector = NULL;
+		struct drm_connector_state *new_con_state = NULL;
+		struct dm_connector_state *dm_conn_state = NULL;
+
+		new_stream = NULL;
+
+		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+		acrtc = to_amdgpu_crtc(crtc);
+
+		aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
+
+		/* TODO This hack should go away */
+		if (aconnector && enable) {
+			// Make sure fake sink is created in plug-in scenario
+			new_con_state = drm_atomic_get_connector_state(state,
+ 								    &aconnector->base);
+
+			if (IS_ERR(new_con_state)) {
+				ret = PTR_ERR_OR_ZERO(new_con_state);
+				break;
+			}
+
+			dm_conn_state = to_dm_connector_state(new_con_state);
+
+			new_stream = create_stream_for_sink(aconnector,
+							     &new_crtc_state->mode,
+							    dm_conn_state);
+
+			/*
+			 * we can have no stream on ACTION_SET if a display
+			 * was disconnected during S3, in this case it not and
+			 * error, the OS will be updated after detection, and
+			 * do the right thing on next atomic commit
+			 */
+
+			if (!new_stream) {
+				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+						__func__, acrtc->base.base.id);
+				break;
+			}
+		}
+
+		if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+				dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
+
+			new_crtc_state->mode_changed = false;
+
+			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+				         new_crtc_state->mode_changed);
+		}
+
+
+		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+			goto next_crtc;
+
+		DRM_DEBUG_DRIVER(
+			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
+			"connectors_changed:%d\n",
+			acrtc->crtc_id,
+			new_crtc_state->enable,
+			new_crtc_state->active,
+			new_crtc_state->planes_changed,
+			new_crtc_state->mode_changed,
+			new_crtc_state->active_changed,
+			new_crtc_state->connectors_changed);
+
+		/* Remove stream for any changed/disabled CRTC */
+		if (!enable) {
+
+			if (!dm_old_crtc_state->stream)
+				goto next_crtc;
+
+			DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
+					crtc->base.id);
+
+			/* i.e. reset mode */
+			if (dc_remove_stream_from_ctx(
+					dc,
+					dm_state->context,
+					dm_old_crtc_state->stream) != DC_OK) {
+				ret = -EINVAL;
+				goto fail;
+			}
+
+			dc_stream_release(dm_old_crtc_state->stream);
+			dm_new_crtc_state->stream = NULL;
+
+			*lock_and_validation_needed = true;
+
+		} else {/* Add stream for any updated/enabled CRTC */
+			/*
+			 * Quick fix to prevent NULL pointer on new_stream when
+			 * added MST connectors not found in existing crtc_state in the chained mode
+			 * TODO: need to dig out the root cause of that
+			 */
+			if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
+				goto next_crtc;
+
+			if (modereset_required(new_crtc_state))
+				goto next_crtc;
+
+			if (modeset_required(new_crtc_state, new_stream,
+					     dm_old_crtc_state->stream)) {
+
+				WARN_ON(dm_new_crtc_state->stream);
+
+				dm_new_crtc_state->stream = new_stream;
+				dc_stream_retain(new_stream);
+
+				DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
+							crtc->base.id);
+
+				if (dc_add_stream_to_ctx(
+						dc,
+						dm_state->context,
+						dm_new_crtc_state->stream) != DC_OK) {
+					ret = -EINVAL;
+					goto fail;
+				}
+
+				*lock_and_validation_needed = true;
+			}
+		}
+
+next_crtc:
+		/* Release extra reference */
+		if (new_stream)
+			 dc_stream_release(new_stream);
+	}
+
+	return ret;
+
+fail:
+	if (new_stream)
+		dc_stream_release(new_stream);
+	return ret;
+}
+
+static int dm_update_planes_state(struct dc *dc,
+				  struct drm_atomic_state *state,
+				  bool enable,
+				  bool *lock_and_validation_needed)
+{
+	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
+	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+	struct drm_plane *plane;
+	struct drm_plane_state *old_plane_state, *new_plane_state;
+	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
+	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
+	int i ;
+	/* TODO return page_flip_needed() function */
+	bool pflip_needed  = !state->allow_modeset;
+	int ret = 0;
+
+	if (pflip_needed)
+		return ret;
+
+	/* Add new planes */
+	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+		new_plane_crtc = new_plane_state->crtc;
+		old_plane_crtc = old_plane_state->crtc;
+		dm_new_plane_state = to_dm_plane_state(new_plane_state);
+		dm_old_plane_state = to_dm_plane_state(old_plane_state);
+
+		/*TODO Implement atomic check for cursor plane */
+		if (plane->type == DRM_PLANE_TYPE_CURSOR)
+			continue;
+
+		/* Remove any changed/removed planes */
+		if (!enable) {
+
+			if (!old_plane_crtc)
+				continue;
+
+			old_crtc_state = drm_atomic_get_old_crtc_state(
+					state, old_plane_crtc);
+			dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+			if (!dm_old_crtc_state->stream)
+				continue;
+
+			DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
+					plane->base.id, old_plane_crtc->base.id);
+
+			if (!dc_remove_plane_from_context(
+					dc,
+					dm_old_crtc_state->stream,
+					dm_old_plane_state->dc_state,
+					dm_state->context)) {
+
+				ret = EINVAL;
+				return ret;
+			}
+
+
+			dc_plane_state_release(dm_old_plane_state->dc_state);
+			dm_new_plane_state->dc_state = NULL;
+
+			*lock_and_validation_needed = true;
+
+		} else { /* Add new planes */
+
+			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
+				continue;
+
+			if (!new_plane_crtc)
+				continue;
+
+			new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
+			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+			if (!dm_new_crtc_state->stream)
+				continue;
+
+
+			WARN_ON(dm_new_plane_state->dc_state);
+
+			dm_new_plane_state->dc_state = dc_create_plane_state(dc);
+
+			DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
+					plane->base.id, new_plane_crtc->base.id);
+
+			if (!dm_new_plane_state->dc_state) {
+				ret = -EINVAL;
+				return ret;
+			}
+
+			ret = fill_plane_attributes(
+				new_plane_crtc->dev->dev_private,
+				dm_new_plane_state->dc_state,
+				new_plane_state,
+				new_crtc_state,
+				false);
+			if (ret)
+				return ret;
+
+
+			if (!dc_add_plane_to_context(
+					dc,
+					dm_new_crtc_state->stream,
+					dm_new_plane_state->dc_state,
+					dm_state->context)) {
+
+				ret = -EINVAL;
+				return ret;
+			}
+
+			*lock_and_validation_needed = true;
+		}
+	}
+
+
+	return ret;
+}
+
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+				  struct drm_atomic_state *state)
+{
+	int i;
+	int ret;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct dc *dc = adev->dm.dc;
+	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+	struct drm_connector *connector;
+	struct drm_connector_state *old_con_state, *new_con_state;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+
+	/*
+	 * This bool will be set for true for any modeset/reset
+	 * or plane update which implies non fast surface update.
+	 */
+	bool lock_and_validation_needed = false;
+
+	ret = drm_atomic_helper_check_modeset(dev, state);
+	if (ret)
+		goto fail;
+
+	/*
+	 * legacy_cursor_update should be made false for SoC's having
+	 * a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(),
+	 * otherwise for software cursor plane,
+	 * we should not add it to list of affected planes.
+	 */
+	if (state->legacy_cursor_update) {
+		for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+			if (new_crtc_state->color_mgmt_changed) {
+				ret = drm_atomic_add_affected_planes(state, crtc);
+				if (ret)
+					goto fail;
+			}
+		}
+	} else {
+		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+			if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+				continue;
+
+			if (!new_crtc_state->enable)
+				continue;
+
+			ret = drm_atomic_add_affected_connectors(state, crtc);
+			if (ret)
+				return ret;
+
+			ret = drm_atomic_add_affected_planes(state, crtc);
+			if (ret)
+				goto fail;
+		}
+	}
+
+	dm_state->context = dc_create_state();
+	ASSERT(dm_state->context);
+	dc_resource_state_copy_construct_current(dc, dm_state->context);
+
+	/* Remove exiting planes if they are modified */
+	ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
+	if (ret) {
+		goto fail;
+	}
+
+	/* Disable all crtcs which require disable */
+	ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
+	if (ret) {
+		goto fail;
+	}
+
+	/* Enable all crtcs which require enable */
+	ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
+	if (ret) {
+		goto fail;
+	}
+
+	/* Add new/modified planes */
+	ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
+	if (ret) {
+		goto fail;
+	}
+
+	/* Run this here since we want to validate the streams we created */
+	ret = drm_atomic_helper_check_planes(dev, state);
+	if (ret)
+		goto fail;
+
+	/* Check scaling and underscan changes*/
+	/*TODO Removed scaling changes validation due to inability to commit
+	 * new stream into context w\o causing full reset. Need to
+	 * decide how to handle.
+	 */
+	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+
+		/* Skip any modesets/resets */
+		if (!acrtc || drm_atomic_crtc_needs_modeset(
+				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
+			continue;
+
+		/* Skip any thing not scale or underscan changes */
+		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
+			continue;
+
+		lock_and_validation_needed = true;
+	}
+
+	/*
+	 * For full updates case when
+	 * removing/adding/updating  streams on once CRTC while flipping
+	 * on another CRTC,
+	 * acquiring global lock  will guarantee that any such full
+	 * update commit
+	 * will wait for completion of any outstanding flip using DRMs
+	 * synchronization events.
+	 */
+
+	if (lock_and_validation_needed) {
+
+		ret = do_aquire_global_lock(dev, state);
+		if (ret)
+			goto fail;
+
+		if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
+			ret = -EINVAL;
+			goto fail;
+		}
+	}
+
+	/* Must be success */
+	WARN_ON(ret);
+	return ret;
+
+fail:
+	if (ret == -EDEADLK)
+		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
+	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
+		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
+	else
+		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
+
+	return ret;
+}
+
+static bool is_dp_capable_without_timing_msa(struct dc *dc,
+					     struct amdgpu_dm_connector *amdgpu_dm_connector)
+{
+	uint8_t dpcd_data;
+	bool capable = false;
+
+	if (amdgpu_dm_connector->dc_link &&
+		dm_helpers_dp_read_dpcd(
+				NULL,
+				amdgpu_dm_connector->dc_link,
+				DP_DOWN_STREAM_PORT_COUNT,
+				&dpcd_data,
+				sizeof(dpcd_data))) {
+		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
+	}
+
+	return capable;
+}
+void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
+					   struct edid *edid)
+{
+	int i;
+	uint64_t val_capable;
+	bool edid_check_required;
+	struct detailed_timing *timing;
+	struct detailed_non_pixel *data;
+	struct detailed_data_monitor_range *range;
+	struct amdgpu_dm_connector *amdgpu_dm_connector =
+			to_amdgpu_dm_connector(connector);
+
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+
+	edid_check_required = false;
+	if (!amdgpu_dm_connector->dc_sink) {
+		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
+		return;
+	}
+	if (!adev->dm.freesync_module)
+		return;
+	/*
+	 * if edid non zero restrict freesync only for dp and edp
+	 */
+	if (edid) {
+		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
+			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
+			edid_check_required = is_dp_capable_without_timing_msa(
+						adev->dm.dc,
+						amdgpu_dm_connector);
+		}
+	}
+	val_capable = 0;
+	if (edid_check_required == true && (edid->version > 1 ||
+	   (edid->version == 1 && edid->revision > 1))) {
+		for (i = 0; i < 4; i++) {
+
+			timing	= &edid->detailed_timings[i];
+			data	= &timing->data.other_data;
+			range	= &data->data.range;
+			/*
+			 * Check if monitor has continuous frequency mode
+			 */
+			if (data->type != EDID_DETAIL_MONITOR_RANGE)
+				continue;
+			/*
+			 * Check for flag range limits only. If flag == 1 then
+			 * no additional timing information provided.
+			 * Default GTF, GTF Secondary curve and CVT are not
+			 * supported
+			 */
+			if (range->flags != 1)
+				continue;
+
+			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
+			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
+			amdgpu_dm_connector->pixel_clock_mhz =
+				range->pixel_clock_mhz * 10;
+			break;
+		}
+
+		if (amdgpu_dm_connector->max_vfreq -
+				amdgpu_dm_connector->min_vfreq > 10) {
+			amdgpu_dm_connector->caps.supported = true;
+			amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
+					amdgpu_dm_connector->min_vfreq * 1000000;
+			amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
+					amdgpu_dm_connector->max_vfreq * 1000000;
+				val_capable = 1;
+		}
+	}
+
+	/*
+	 * TODO figure out how to notify user-mode or DRM of freesync caps
+	 * once we figure out how to deal with freesync in an upstreamable
+	 * fashion
+	 */
+
+}
+
+void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector)
+{
+	/*
+	 * TODO fill in once we figure out how to deal with freesync in
+	 * an upstreamable fashion
+	 */
+}

+ 259 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h

@@ -0,0 +1,259 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_H__
+#define __AMDGPU_DM_H__
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include "dc.h"
+
+/*
+ * This file contains the definition for amdgpu_display_manager
+ * and its API for amdgpu driver's use.
+ * This component provides all the display related functionality
+ * and this is the only component that calls DAL API.
+ * The API contained here intended for amdgpu driver use.
+ * The API that is called directly from KMS framework is located
+ * in amdgpu_dm_kms.h file
+ */
+
+#define AMDGPU_DM_MAX_DISPLAY_INDEX 31
+/*
+#include "include/amdgpu_dal_power_if.h"
+#include "amdgpu_dm_irq.h"
+*/
+
+#include "irq_types.h"
+#include "signal_types.h"
+
+/* Forward declarations */
+struct amdgpu_device;
+struct drm_device;
+struct amdgpu_dm_irq_handler_data;
+
+struct amdgpu_dm_prev_state {
+	struct drm_framebuffer *fb;
+	int32_t x;
+	int32_t y;
+	struct drm_display_mode mode;
+};
+
+struct common_irq_params {
+	struct amdgpu_device *adev;
+	enum dc_irq_source irq_src;
+};
+
+struct irq_list_head {
+	struct list_head head;
+	/* In case this interrupt needs post-processing, 'work' will be queued*/
+	struct work_struct work;
+};
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+struct dm_comressor_info {
+	void *cpu_addr;
+	struct amdgpu_bo *bo_ptr;
+	uint64_t gpu_addr;
+};
+#endif
+
+
+struct amdgpu_display_manager {
+	struct dal *dal;
+	struct dc *dc;
+	struct cgs_device *cgs_device;
+	/* lock to be used when DAL is called from SYNC IRQ context */
+	spinlock_t dal_lock;
+
+	struct amdgpu_device *adev;	/*AMD base driver*/
+	struct drm_device *ddev;	/*DRM base driver*/
+	u16 display_indexes_num;
+
+	struct amdgpu_dm_prev_state prev_state;
+
+	/*
+	 * 'irq_source_handler_table' holds a list of handlers
+	 * per (DAL) IRQ source.
+	 *
+	 * Each IRQ source may need to be handled at different contexts.
+	 * By 'context' we mean, for example:
+	 * - The ISR context, which is the direct interrupt handler.
+	 * - The 'deferred' context - this is the post-processing of the
+	 *	interrupt, but at a lower priority.
+	 *
+	 * Note that handlers are called in the same order as they were
+	 * registered (FIFO).
+	 */
+	struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
+	struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
+
+	struct common_irq_params
+	pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
+
+	struct common_irq_params
+	vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
+
+	/* this spin lock synchronizes access to 'irq_handler_list_table' */
+	spinlock_t irq_handler_list_table_lock;
+
+	/* Timer-related data. */
+	struct list_head timer_handler_list;
+	struct workqueue_struct *timer_workqueue;
+
+	/* Use dal_mutex for any activity which is NOT syncronized by
+	 * DRM mode setting locks.
+	 * For example: amdgpu_dm_hpd_low_irq() calls into DAL *without*
+	 * DRM mode setting locks being acquired. This is where dal_mutex
+	 * is acquired before calling into DAL. */
+	struct mutex dal_mutex;
+
+	struct backlight_device *backlight_dev;
+
+	const struct dc_link *backlight_link;
+
+	struct work_struct mst_hotplug_work;
+
+	struct mod_freesync *freesync_module;
+
+	/**
+	 * Caches device atomic state for suspend/resume
+	 */
+	struct drm_atomic_state *cached_state;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+	struct dm_comressor_info compressor;
+#endif
+};
+
+struct amdgpu_dm_connector {
+
+	struct drm_connector base;
+	uint32_t connector_id;
+
+	/* we need to mind the EDID between detect
+	   and get modes due to analog/digital/tvencoder */
+	struct edid *edid;
+
+	/* shared with amdgpu */
+	struct amdgpu_hpd hpd;
+
+	/* number of modes generated from EDID at 'dc_sink' */
+	int num_modes;
+
+	/* The 'old' sink - before an HPD.
+	 * The 'current' sink is in dc_link->sink. */
+	struct dc_sink *dc_sink;
+	struct dc_link *dc_link;
+	struct dc_sink *dc_em_sink;
+
+	/* DM only */
+	struct drm_dp_mst_topology_mgr mst_mgr;
+	struct amdgpu_dm_dp_aux dm_dp_aux;
+	struct drm_dp_mst_port *port;
+	struct amdgpu_dm_connector *mst_port;
+	struct amdgpu_encoder *mst_encoder;
+
+	/* TODO see if we can merge with ddc_bus or make a dm_connector */
+	struct amdgpu_i2c_adapter *i2c;
+
+	/* Monitor range limits */
+	int min_vfreq ;
+	int max_vfreq ;
+	int pixel_clock_mhz;
+
+	/*freesync caps*/
+	struct mod_freesync_caps caps;
+
+	struct mutex hpd_lock;
+
+	bool fake_enable;
+};
+
+#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
+
+extern const struct amdgpu_ip_block_version dm_ip_block;
+
+struct amdgpu_framebuffer;
+struct amdgpu_display_manager;
+struct dc_validation_set;
+struct dc_plane_state;
+
+struct dm_plane_state {
+	struct drm_plane_state base;
+	struct dc_plane_state *dc_state;
+};
+
+struct dm_crtc_state {
+	struct drm_crtc_state base;
+	struct dc_stream_state *stream;
+};
+
+#define to_dm_crtc_state(x)    container_of(x, struct dm_crtc_state, base)
+
+struct dm_atomic_state {
+	struct drm_atomic_state base;
+
+	struct dc_state *context;
+};
+
+#define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
+
+
+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
+struct drm_connector_state *
+amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector);
+int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+					    struct drm_connector_state *state,
+					    struct drm_property *property,
+					    uint64_t val);
+
+int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+					    const struct drm_connector_state *state,
+					    struct drm_property *property,
+					    uint64_t *val);
+
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
+
+void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+				     struct amdgpu_dm_connector *aconnector,
+				     int connector_type,
+				     struct dc_link *link,
+				     int link_index);
+
+int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+				   struct drm_display_mode *mode);
+
+void dm_restore_drm_connector_state(struct drm_device *dev,
+				    struct drm_connector *connector);
+
+void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
+					   struct edid *edid);
+
+void
+amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector);
+
+extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
+
+#endif /* __AMDGPU_DM_H__ */

+ 498 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c

@@ -0,0 +1,498 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/acpi.h>
+#include <linux/version.h>
+#include <linux/i2c.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include <drm/drm_edid.h>
+
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "dc.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+
+#include "dm_helpers.h"
+
+/* dm_helpers_parse_edid_caps
+ *
+ * Parse edid caps
+ *
+ * @edid:	[in] pointer to edid
+ *  edid_caps:	[in] pointer to edid caps
+ * @return
+ *	void
+ * */
+enum dc_edid_status dm_helpers_parse_edid_caps(
+		struct dc_context *ctx,
+		const struct dc_edid *edid,
+		struct dc_edid_caps *edid_caps)
+{
+	struct edid *edid_buf = (struct edid *) edid->raw_edid;
+	struct cea_sad *sads;
+	int sad_count = -1;
+	int sadb_count = -1;
+	int i = 0;
+	int j = 0;
+	uint8_t *sadb = NULL;
+
+	enum dc_edid_status result = EDID_OK;
+
+	if (!edid_caps || !edid)
+		return EDID_BAD_INPUT;
+
+	if (!drm_edid_is_valid(edid_buf))
+		result = EDID_BAD_CHECKSUM;
+
+	edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
+					((uint16_t) edid_buf->mfg_id[1])<<8;
+	edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
+					((uint16_t) edid_buf->prod_code[1])<<8;
+	edid_caps->serial_number = edid_buf->serial;
+	edid_caps->manufacture_week = edid_buf->mfg_week;
+	edid_caps->manufacture_year = edid_buf->mfg_year;
+
+	/* One of the four detailed_timings stores the monitor name. It's
+	 * stored in an array of length 13. */
+	for (i = 0; i < 4; i++) {
+		if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
+			while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) {
+				if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
+					break;
+
+				edid_caps->display_name[j] =
+					edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
+				j++;
+			}
+		}
+	}
+
+	edid_caps->edid_hdmi = drm_detect_hdmi_monitor(
+			(struct edid *) edid->raw_edid);
+
+	sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
+	if (sad_count <= 0) {
+		DRM_INFO("SADs count is: %d, don't need to read it\n",
+				sad_count);
+		return result;
+	}
+
+	edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
+	for (i = 0; i < edid_caps->audio_mode_count; ++i) {
+		struct cea_sad *sad = &sads[i];
+
+		edid_caps->audio_modes[i].format_code = sad->format;
+		edid_caps->audio_modes[i].channel_count = sad->channels;
+		edid_caps->audio_modes[i].sample_rate = sad->freq;
+		edid_caps->audio_modes[i].sample_size = sad->byte2;
+	}
+
+	sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
+
+	if (sadb_count < 0) {
+		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
+		sadb_count = 0;
+	}
+
+	if (sadb_count)
+		edid_caps->speaker_flags = sadb[0];
+	else
+		edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
+
+	kfree(sads);
+	kfree(sadb);
+
+	return result;
+}
+
+static void get_payload_table(
+		struct amdgpu_dm_connector *aconnector,
+		struct dp_mst_stream_allocation_table *proposed_table)
+{
+	int i;
+	struct drm_dp_mst_topology_mgr *mst_mgr =
+			&aconnector->mst_port->mst_mgr;
+
+	mutex_lock(&mst_mgr->payload_lock);
+
+	proposed_table->stream_count = 0;
+
+	/* number of active streams */
+	for (i = 0; i < mst_mgr->max_payloads; i++) {
+		if (mst_mgr->payloads[i].num_slots == 0)
+			break; /* end of vcp_id table */
+
+		ASSERT(mst_mgr->payloads[i].payload_state !=
+				DP_PAYLOAD_DELETE_LOCAL);
+
+		if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
+			mst_mgr->payloads[i].payload_state ==
+					DP_PAYLOAD_REMOTE) {
+
+			struct dp_mst_stream_allocation *sa =
+					&proposed_table->stream_allocations[
+						proposed_table->stream_count];
+
+			sa->slot_count = mst_mgr->payloads[i].num_slots;
+			sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
+			proposed_table->stream_count++;
+		}
+	}
+
+	mutex_unlock(&mst_mgr->payload_lock);
+}
+
+/*
+ * Writes payload allocation table in immediate downstream device.
+ */
+bool dm_helpers_dp_mst_write_payload_allocation_table(
+		struct dc_context *ctx,
+		const struct dc_stream_state *stream,
+		struct dp_mst_stream_allocation_table *proposed_table,
+		bool enable)
+{
+	struct amdgpu_dm_connector *aconnector;
+	struct drm_dp_mst_topology_mgr *mst_mgr;
+	struct drm_dp_mst_port *mst_port;
+	int slots = 0;
+	bool ret;
+	int clock;
+	int bpp = 0;
+	int pbn = 0;
+
+	aconnector = stream->sink->priv;
+
+	if (!aconnector || !aconnector->mst_port)
+		return false;
+
+	mst_mgr = &aconnector->mst_port->mst_mgr;
+
+	if (!mst_mgr->mst_state)
+		return false;
+
+	mst_port = aconnector->port;
+
+	if (enable) {
+		clock = stream->timing.pix_clk_khz;
+
+		switch (stream->timing.display_color_depth) {
+
+		case COLOR_DEPTH_666:
+			bpp = 6;
+			break;
+		case COLOR_DEPTH_888:
+			bpp = 8;
+			break;
+		case COLOR_DEPTH_101010:
+			bpp = 10;
+			break;
+		case COLOR_DEPTH_121212:
+			bpp = 12;
+			break;
+		case COLOR_DEPTH_141414:
+			bpp = 14;
+			break;
+		case COLOR_DEPTH_161616:
+			bpp = 16;
+			break;
+		default:
+			ASSERT(bpp != 0);
+			break;
+		}
+
+		bpp = bpp * 3;
+
+		/* TODO need to know link rate */
+
+		pbn = drm_dp_calc_pbn_mode(clock, bpp);
+
+		slots = drm_dp_find_vcpi_slots(mst_mgr, pbn);
+		ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots);
+
+		if (!ret)
+			return false;
+
+	} else {
+		drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
+	}
+
+	ret = drm_dp_update_payload_part1(mst_mgr);
+
+	/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
+	 * AUX message. The sequence is slot 1-63 allocated sequence for each
+	 * stream. AMD ASIC stream slot allocation should follow the same
+	 * sequence. copy DRM MST allocation to dc */
+
+	get_payload_table(aconnector, proposed_table);
+
+	if (ret)
+		return false;
+
+	return true;
+}
+
+/*
+ * Polls for ACT (allocation change trigger) handled and sends
+ * ALLOCATE_PAYLOAD message.
+ */
+bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+		struct dc_context *ctx,
+		const struct dc_stream_state *stream)
+{
+	struct amdgpu_dm_connector *aconnector;
+	struct drm_dp_mst_topology_mgr *mst_mgr;
+	int ret;
+
+	aconnector = stream->sink->priv;
+
+	if (!aconnector || !aconnector->mst_port)
+		return false;
+
+	mst_mgr = &aconnector->mst_port->mst_mgr;
+
+	if (!mst_mgr->mst_state)
+		return false;
+
+	ret = drm_dp_check_act_status(mst_mgr);
+
+	if (ret)
+		return false;
+
+	return true;
+}
+
+bool dm_helpers_dp_mst_send_payload_allocation(
+		struct dc_context *ctx,
+		const struct dc_stream_state *stream,
+		bool enable)
+{
+	struct amdgpu_dm_connector *aconnector;
+	struct drm_dp_mst_topology_mgr *mst_mgr;
+	struct drm_dp_mst_port *mst_port;
+	int ret;
+
+	aconnector = stream->sink->priv;
+
+	if (!aconnector || !aconnector->mst_port)
+		return false;
+
+	mst_port = aconnector->port;
+
+	mst_mgr = &aconnector->mst_port->mst_mgr;
+
+	if (!mst_mgr->mst_state)
+		return false;
+
+	ret = drm_dp_update_payload_part2(mst_mgr);
+
+	if (ret)
+		return false;
+
+	if (!enable)
+		drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
+
+	return true;
+}
+
+bool dm_helpers_dc_conn_log(struct dc_context *ctx, struct log_entry *entry, enum dc_log_type event)
+{
+	return true;
+}
+
+void dm_dtn_log_begin(struct dc_context *ctx)
+{}
+
+void dm_dtn_log_append_v(struct dc_context *ctx,
+		const char *pMsg, ...)
+{}
+
+void dm_dtn_log_end(struct dc_context *ctx)
+{}
+
+bool dm_helpers_dp_mst_start_top_mgr(
+		struct dc_context *ctx,
+		const struct dc_link *link,
+		bool boot)
+{
+	struct amdgpu_dm_connector *aconnector = link->priv;
+
+	if (!aconnector) {
+			DRM_ERROR("Failed to found connector for link!");
+			return false;
+	}
+
+	if (boot) {
+		DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
+					aconnector, aconnector->base.base.id);
+		return true;
+	}
+
+	DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+			aconnector, aconnector->base.base.id);
+
+	return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
+}
+
+void dm_helpers_dp_mst_stop_top_mgr(
+		struct dc_context *ctx,
+		const struct dc_link *link)
+{
+	struct amdgpu_dm_connector *aconnector = link->priv;
+
+	if (!aconnector) {
+			DRM_ERROR("Failed to found connector for link!");
+			return;
+	}
+
+	DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
+			aconnector, aconnector->base.base.id);
+
+	if (aconnector->mst_mgr.mst_state == true)
+		drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
+}
+
+bool dm_helpers_dp_read_dpcd(
+		struct dc_context *ctx,
+		const struct dc_link *link,
+		uint32_t address,
+		uint8_t *data,
+		uint32_t size)
+{
+
+	struct amdgpu_dm_connector *aconnector = link->priv;
+
+	if (!aconnector) {
+		DRM_ERROR("Failed to found connector for link!");
+		return false;
+	}
+
+	return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
+			data, size) > 0;
+}
+
+bool dm_helpers_dp_write_dpcd(
+		struct dc_context *ctx,
+		const struct dc_link *link,
+		uint32_t address,
+		const uint8_t *data,
+		uint32_t size)
+{
+	struct amdgpu_dm_connector *aconnector = link->priv;
+
+	if (!aconnector) {
+		DRM_ERROR("Failed to found connector for link!");
+		return false;
+	}
+
+	return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
+			address, (uint8_t *)data, size) > 0;
+}
+
+bool dm_helpers_submit_i2c(
+		struct dc_context *ctx,
+		const struct dc_link *link,
+		struct i2c_command *cmd)
+{
+	struct amdgpu_dm_connector *aconnector = link->priv;
+	struct i2c_msg *msgs;
+	int i = 0;
+	int num = cmd->number_of_payloads;
+	bool result;
+
+	if (!aconnector) {
+		DRM_ERROR("Failed to found connector for link!");
+		return false;
+	}
+
+	msgs = kzalloc(num * sizeof(struct i2c_msg), GFP_KERNEL);
+
+	if (!msgs)
+		return false;
+
+	for (i = 0; i < num; i++) {
+		msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD;
+		msgs[i].addr = cmd->payloads[i].address;
+		msgs[i].len = cmd->payloads[i].length;
+		msgs[i].buf = cmd->payloads[i].data;
+	}
+
+	result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
+
+	kfree(msgs);
+
+	return result;
+}
+
+enum dc_edid_status dm_helpers_read_local_edid(
+		struct dc_context *ctx,
+		struct dc_link *link,
+		struct dc_sink *sink)
+{
+	struct amdgpu_dm_connector *aconnector = link->priv;
+	struct i2c_adapter *ddc;
+	int retry = 3;
+	enum dc_edid_status edid_status;
+	struct edid *edid;
+
+	if (link->aux_mode)
+		ddc = &aconnector->dm_dp_aux.aux.ddc;
+	else
+		ddc = &aconnector->i2c->base;
+
+	/* some dongles read edid incorrectly the first time,
+	 * do check sum and retry to make sure read correct edid.
+	 */
+	do {
+
+		edid = drm_get_edid(&aconnector->base, ddc);
+
+		if (!edid)
+			return EDID_NO_RESPONSE;
+
+		sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
+		memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
+
+		/* We don't need the original edid anymore */
+		kfree(edid);
+
+		edid_status = dm_helpers_parse_edid_caps(
+						ctx,
+						&sink->dc_edid,
+						&sink->edid_caps);
+
+	} while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
+
+	if (edid_status != EDID_OK)
+		DRM_ERROR("EDID err: %d, on connector: %s",
+				edid_status,
+				aconnector->base.name);
+
+	return edid_status;
+}

+ 755 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c

@@ -0,0 +1,755 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <drm/drmP.h>
+
+#include "dm_services_types.h"
+#include "dc.h"
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+
+/******************************************************************************
+ * Private declarations.
+ *****************************************************************************/
+
+struct handler_common_data {
+	struct list_head list;
+	interrupt_handler handler;
+	void *handler_arg;
+
+	/* DM which this handler belongs to */
+	struct amdgpu_display_manager *dm;
+};
+
+struct amdgpu_dm_irq_handler_data {
+	struct handler_common_data hcd;
+	/* DAL irq source which registered for this interrupt. */
+	enum dc_irq_source irq_source;
+};
+
+struct amdgpu_dm_timer_handler_data {
+	struct handler_common_data hcd;
+	struct delayed_work d_work;
+};
+
+#define DM_IRQ_TABLE_LOCK(adev, flags) \
+	spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
+
+#define DM_IRQ_TABLE_UNLOCK(adev, flags) \
+	spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
+
+/******************************************************************************
+ * Private functions.
+ *****************************************************************************/
+
+static void init_handler_common_data(struct handler_common_data *hcd,
+				     void (*ih)(void *),
+				     void *args,
+				     struct amdgpu_display_manager *dm)
+{
+	hcd->handler = ih;
+	hcd->handler_arg = args;
+	hcd->dm = dm;
+}
+
+/**
+ * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper.
+ *
+ * @work: work struct
+ */
+static void dm_irq_work_func(struct work_struct *work)
+{
+	struct list_head *entry;
+	struct irq_list_head *irq_list_head =
+		container_of(work, struct irq_list_head, work);
+	struct list_head *handler_list = &irq_list_head->head;
+	struct amdgpu_dm_irq_handler_data *handler_data;
+
+	list_for_each(entry, handler_list) {
+		handler_data =
+			list_entry(
+				entry,
+				struct amdgpu_dm_irq_handler_data,
+				hcd.list);
+
+		DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
+				handler_data->irq_source);
+
+		DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
+			handler_data->irq_source);
+
+		handler_data->hcd.handler(handler_data->hcd.handler_arg);
+	}
+
+	/* Call a DAL subcomponent which registered for interrupt notification
+	 * at INTERRUPT_LOW_IRQ_CONTEXT.
+	 * (The most common use is HPD interrupt) */
+}
+
+/**
+ * Remove a handler and return a pointer to hander list from which the
+ * handler was removed.
+ */
+static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
+					    void *ih,
+					    const struct dc_interrupt_params *int_params)
+{
+	struct list_head *hnd_list;
+	struct list_head *entry, *tmp;
+	struct amdgpu_dm_irq_handler_data *handler;
+	unsigned long irq_table_flags;
+	bool handler_removed = false;
+	enum dc_irq_source irq_source;
+
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	irq_source = int_params->irq_source;
+
+	switch (int_params->int_context) {
+	case INTERRUPT_HIGH_IRQ_CONTEXT:
+		hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
+		break;
+	case INTERRUPT_LOW_IRQ_CONTEXT:
+	default:
+		hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
+		break;
+	}
+
+	list_for_each_safe(entry, tmp, hnd_list) {
+
+		handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
+				hcd.list);
+
+		if (ih == handler) {
+			/* Found our handler. Remove it from the list. */
+			list_del(&handler->hcd.list);
+			handler_removed = true;
+			break;
+		}
+	}
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+	if (handler_removed == false) {
+		/* Not necessarily an error - caller may not
+		 * know the context. */
+		return NULL;
+	}
+
+	kfree(handler);
+
+	DRM_DEBUG_KMS(
+	"DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
+		ih, int_params->irq_source, int_params->int_context);
+
+	return hnd_list;
+}
+
+/* If 'handler_in == NULL' then remove ALL handlers. */
+static void remove_timer_handler(struct amdgpu_device *adev,
+				 struct amdgpu_dm_timer_handler_data *handler_in)
+{
+	struct amdgpu_dm_timer_handler_data *handler_temp;
+	struct list_head *handler_list;
+	struct list_head *entry, *tmp;
+	unsigned long irq_table_flags;
+	bool handler_removed = false;
+
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	handler_list = &adev->dm.timer_handler_list;
+
+	list_for_each_safe(entry, tmp, handler_list) {
+		/* Note that list_for_each_safe() guarantees that
+		 * handler_temp is NOT null. */
+		handler_temp = list_entry(entry,
+				struct amdgpu_dm_timer_handler_data, hcd.list);
+
+		if (handler_in == NULL || handler_in == handler_temp) {
+			list_del(&handler_temp->hcd.list);
+			DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+			DRM_DEBUG_KMS("DM_IRQ: removing timer handler: %p\n",
+					handler_temp);
+
+			if (handler_in == NULL) {
+				/* Since it is still in the queue, it must
+				 * be cancelled. */
+				cancel_delayed_work_sync(&handler_temp->d_work);
+			}
+
+			kfree(handler_temp);
+			handler_removed = true;
+
+			DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+		}
+
+		/* Remove ALL handlers. */
+		if (handler_in == NULL)
+			continue;
+
+		/* Remove a SPECIFIC handler.
+		 * Found our handler - we can stop here. */
+		if (handler_in == handler_temp)
+			break;
+	}
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+	if (handler_in != NULL && handler_removed == false)
+		DRM_ERROR("DM_IRQ: handler: %p is not in the list!\n",
+				handler_in);
+}
+
+static bool
+validate_irq_registration_params(struct dc_interrupt_params *int_params,
+				 void (*ih)(void *))
+{
+	if (NULL == int_params || NULL == ih) {
+		DRM_ERROR("DM_IRQ: invalid input!\n");
+		return false;
+	}
+
+	if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
+		DRM_ERROR("DM_IRQ: invalid context: %d!\n",
+				int_params->int_context);
+		return false;
+	}
+
+	if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
+		DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
+				int_params->irq_source);
+		return false;
+	}
+
+	return true;
+}
+
+static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
+					       irq_handler_idx handler_idx)
+{
+	if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
+		DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
+		return false;
+	}
+
+	if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
+		DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
+		return false;
+	}
+
+	return true;
+}
+/******************************************************************************
+ * Public functions.
+ *
+ * Note: caller is responsible for input validation.
+ *****************************************************************************/
+
+void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
+				       struct dc_interrupt_params *int_params,
+				       void (*ih)(void *),
+				       void *handler_args)
+{
+	struct list_head *hnd_list;
+	struct amdgpu_dm_irq_handler_data *handler_data;
+	unsigned long irq_table_flags;
+	enum dc_irq_source irq_source;
+
+	if (false == validate_irq_registration_params(int_params, ih))
+		return DAL_INVALID_IRQ_HANDLER_IDX;
+
+	handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
+	if (!handler_data) {
+		DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
+		return DAL_INVALID_IRQ_HANDLER_IDX;
+	}
+
+	memset(handler_data, 0, sizeof(*handler_data));
+
+	init_handler_common_data(&handler_data->hcd, ih, handler_args,
+			&adev->dm);
+
+	irq_source = int_params->irq_source;
+
+	handler_data->irq_source = irq_source;
+
+	/* Lock the list, add the handler. */
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	switch (int_params->int_context) {
+	case INTERRUPT_HIGH_IRQ_CONTEXT:
+		hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
+		break;
+	case INTERRUPT_LOW_IRQ_CONTEXT:
+	default:
+		hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
+		break;
+	}
+
+	list_add_tail(&handler_data->hcd.list, hnd_list);
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+	/* This pointer will be stored by code which requested interrupt
+	 * registration.
+	 * The same pointer will be needed in order to unregister the
+	 * interrupt. */
+
+	DRM_DEBUG_KMS(
+		"DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
+		handler_data,
+		irq_source,
+		int_params->int_context);
+
+	return handler_data;
+}
+
+void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
+					enum dc_irq_source irq_source,
+					void *ih)
+{
+	struct list_head *handler_list;
+	struct dc_interrupt_params int_params;
+	int i;
+
+	if (false == validate_irq_unregistration_params(irq_source, ih))
+		return;
+
+	memset(&int_params, 0, sizeof(int_params));
+
+	int_params.irq_source = irq_source;
+
+	for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
+
+		int_params.int_context = i;
+
+		handler_list = remove_irq_handler(adev, ih, &int_params);
+
+		if (handler_list != NULL)
+			break;
+	}
+
+	if (handler_list == NULL) {
+		/* If we got here, it means we searched all irq contexts
+		 * for this irq source, but the handler was not found. */
+		DRM_ERROR(
+		"DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
+			ih, irq_source);
+	}
+}
+
+int amdgpu_dm_irq_init(struct amdgpu_device *adev)
+{
+	int src;
+	struct irq_list_head *lh;
+
+	DRM_DEBUG_KMS("DM_IRQ\n");
+
+	spin_lock_init(&adev->dm.irq_handler_list_table_lock);
+
+	for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+		/* low context handler list init */
+		lh = &adev->dm.irq_handler_list_low_tab[src];
+		INIT_LIST_HEAD(&lh->head);
+		INIT_WORK(&lh->work, dm_irq_work_func);
+
+		/* high context handler init */
+		INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
+	}
+
+	INIT_LIST_HEAD(&adev->dm.timer_handler_list);
+
+	/* allocate and initialize the workqueue for DM timer */
+	adev->dm.timer_workqueue = create_singlethread_workqueue(
+			"dm_timer_queue");
+	if (adev->dm.timer_workqueue == NULL) {
+		DRM_ERROR("DM_IRQ: unable to create timer queue!\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+/* DM IRQ and timer resource release */
+void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
+{
+	int src;
+	struct irq_list_head *lh;
+	DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
+
+	for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+
+		/* The handler was removed from the table,
+		 * it means it is safe to flush all the 'work'
+		 * (because no code can schedule a new one). */
+		lh = &adev->dm.irq_handler_list_low_tab[src];
+		flush_work(&lh->work);
+	}
+
+	/* Cancel ALL timers and release handlers (if any). */
+	remove_timer_handler(adev, NULL);
+	/* Release the queue itself. */
+	destroy_workqueue(adev->dm.timer_workqueue);
+}
+
+int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
+{
+	int src;
+	struct list_head *hnd_list_h;
+	struct list_head *hnd_list_l;
+	unsigned long irq_table_flags;
+
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	DRM_DEBUG_KMS("DM_IRQ: suspend\n");
+
+	/**
+	 * Disable HW interrupt  for HPD and HPDRX only since FLIP and VBLANK
+	 * will be disabled from manage_dm_interrupts on disable CRTC.
+	 */
+	for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
+		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+		hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+		if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+			dc_interrupt_set(adev->dm.dc, src, false);
+
+		DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+		flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
+
+		DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+	}
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+	return 0;
+}
+
+int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
+{
+	int src;
+	struct list_head *hnd_list_h, *hnd_list_l;
+	unsigned long irq_table_flags;
+
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	DRM_DEBUG_KMS("DM_IRQ: early resume\n");
+
+	/* re-enable short pulse interrupts HW interrupt */
+	for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
+		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+		hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+		if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+			dc_interrupt_set(adev->dm.dc, src, true);
+	}
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+	return 0;
+}
+
+int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
+{
+	int src;
+	struct list_head *hnd_list_h, *hnd_list_l;
+	unsigned long irq_table_flags;
+
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	DRM_DEBUG_KMS("DM_IRQ: resume\n");
+
+	/**
+	 * Renable HW interrupt  for HPD and only since FLIP and VBLANK
+	 * will be enabled from manage_dm_interrupts on enable CRTC.
+	 */
+	for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
+		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+		hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+		if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+			dc_interrupt_set(adev->dm.dc, src, true);
+	}
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+	return 0;
+}
+
+/**
+ * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
+ * "irq_source".
+ */
+static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
+					enum dc_irq_source irq_source)
+{
+	unsigned long irq_table_flags;
+	struct work_struct *work = NULL;
+
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
+		work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+	if (work) {
+		if (!schedule_work(work))
+			DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
+						irq_source);
+	}
+
+}
+
+/** amdgpu_dm_irq_immediate_work
+ *  Callback high irq work immediately, don't send to work queue
+ */
+static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
+					 enum dc_irq_source irq_source)
+{
+	struct amdgpu_dm_irq_handler_data *handler_data;
+	struct list_head *entry;
+	unsigned long irq_table_flags;
+
+	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+	list_for_each(
+		entry,
+		&adev->dm.irq_handler_list_high_tab[irq_source]) {
+
+		handler_data =
+			list_entry(
+				entry,
+				struct amdgpu_dm_irq_handler_data,
+				hcd.list);
+
+		/* Call a subcomponent which registered for immediate
+		 * interrupt notification */
+		handler_data->hcd.handler(handler_data->hcd.handler_arg);
+	}
+
+	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+}
+
+/*
+ * amdgpu_dm_irq_handler
+ *
+ * Generic IRQ handler, calls all registered high irq work immediately, and
+ * schedules work for low irq
+ */
+static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
+				 struct amdgpu_irq_src *source,
+				 struct amdgpu_iv_entry *entry)
+{
+
+	enum dc_irq_source src =
+		dc_interrupt_to_irq_source(
+			adev->dm.dc,
+			entry->src_id,
+			entry->src_data[0]);
+
+	dc_interrupt_ack(adev->dm.dc, src);
+
+	/* Call high irq work immediately */
+	amdgpu_dm_irq_immediate_work(adev, src);
+	/*Schedule low_irq work */
+	amdgpu_dm_irq_schedule_work(adev, src);
+
+	return 0;
+}
+
+static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
+{
+	switch (type) {
+	case AMDGPU_HPD_1:
+		return DC_IRQ_SOURCE_HPD1;
+	case AMDGPU_HPD_2:
+		return DC_IRQ_SOURCE_HPD2;
+	case AMDGPU_HPD_3:
+		return DC_IRQ_SOURCE_HPD3;
+	case AMDGPU_HPD_4:
+		return DC_IRQ_SOURCE_HPD4;
+	case AMDGPU_HPD_5:
+		return DC_IRQ_SOURCE_HPD5;
+	case AMDGPU_HPD_6:
+		return DC_IRQ_SOURCE_HPD6;
+	default:
+		return DC_IRQ_SOURCE_INVALID;
+	}
+}
+
+static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
+				       struct amdgpu_irq_src *source,
+				       unsigned type,
+				       enum amdgpu_interrupt_state state)
+{
+	enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
+	bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
+
+	dc_interrupt_set(adev->dm.dc, src, st);
+	return 0;
+}
+
+static inline int dm_irq_state(struct amdgpu_device *adev,
+			       struct amdgpu_irq_src *source,
+			       unsigned crtc_id,
+			       enum amdgpu_interrupt_state state,
+			       const enum irq_type dal_irq_type,
+			       const char *func)
+{
+	bool st;
+	enum dc_irq_source irq_source;
+
+	struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
+
+	if (!acrtc) {
+		DRM_ERROR(
+			"%s: crtc is NULL at id :%d\n",
+			func,
+			crtc_id);
+		return 0;
+	}
+
+	irq_source = dal_irq_type + acrtc->otg_inst;
+
+	st = (state == AMDGPU_IRQ_STATE_ENABLE);
+
+	dc_interrupt_set(adev->dm.dc, irq_source, st);
+	return 0;
+}
+
+static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
+					 struct amdgpu_irq_src *source,
+					 unsigned crtc_id,
+					 enum amdgpu_interrupt_state state)
+{
+	return dm_irq_state(
+		adev,
+		source,
+		crtc_id,
+		state,
+		IRQ_TYPE_PFLIP,
+		__func__);
+}
+
+static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
+					struct amdgpu_irq_src *source,
+					unsigned crtc_id,
+					enum amdgpu_interrupt_state state)
+{
+	return dm_irq_state(
+		adev,
+		source,
+		crtc_id,
+		state,
+		IRQ_TYPE_VBLANK,
+		__func__);
+}
+
+static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
+	.set = amdgpu_dm_set_crtc_irq_state,
+	.process = amdgpu_dm_irq_handler,
+};
+
+static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
+	.set = amdgpu_dm_set_pflip_irq_state,
+	.process = amdgpu_dm_irq_handler,
+};
+
+static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
+	.set = amdgpu_dm_set_hpd_irq_state,
+	.process = amdgpu_dm_irq_handler,
+};
+
+void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+	adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
+
+	adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+	adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
+
+	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+	adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
+}
+
+/*
+ * amdgpu_dm_hpd_init - hpd setup callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct amdgpu_dm_connector *amdgpu_dm_connector =
+				to_amdgpu_dm_connector(connector);
+
+		const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
+
+		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+			dc_interrupt_set(adev->dm.dc,
+					dc_link->irq_source_hpd,
+					true);
+		}
+
+		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+			dc_interrupt_set(adev->dm.dc,
+					dc_link->irq_source_hpd_rx,
+					true);
+		}
+	}
+}
+
+/**
+ * amdgpu_dm_hpd_fini - hpd tear down callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct amdgpu_dm_connector *amdgpu_dm_connector =
+				to_amdgpu_dm_connector(connector);
+		const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
+
+		dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false);
+
+		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+			dc_interrupt_set(adev->dm.dc,
+					dc_link->irq_source_hpd_rx,
+					false);
+		}
+	}
+}

+ 102 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h

@@ -0,0 +1,102 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_DM_IRQ_H__
+#define __AMDGPU_DM_IRQ_H__
+
+#include "irq_types.h" /* DAL irq definitions */
+
+/*
+ * Display Manager IRQ-related interfaces (for use by DAL).
+ */
+
+/**
+ * amdgpu_dm_irq_init - Initialize internal structures of 'amdgpu_dm_irq'.
+ *
+ * This function should be called exactly once - during DM initialization.
+ *
+ * Returns:
+ *	0 - success
+ *	non-zero - error
+ */
+int amdgpu_dm_irq_init(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_fini - deallocate internal structures of 'amdgpu_dm_irq'.
+ *
+ * This function should be called exactly once - during DM destruction.
+ *
+ */
+void amdgpu_dm_irq_fini(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_register_interrupt - register irq handler for Display block.
+ *
+ * @adev: AMD DRM device
+ * @int_params: parameters for the irq
+ * @ih: pointer to the irq hander function
+ * @handler_args: arguments which will be passed to ih
+ *
+ * Returns:
+ * 	IRQ Handler Index on success.
+ * 	NULL on failure.
+ *
+ * Cannot be called from an interrupt handler.
+ */
+void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
+				       struct dc_interrupt_params *int_params,
+				       void (*ih)(void *),
+				       void *handler_args);
+
+/**
+ * amdgpu_dm_irq_unregister_interrupt - unregister handler which was registered
+ *	by amdgpu_dm_irq_register_interrupt().
+ *
+ * @adev: AMD DRM device.
+ * @ih_index: irq handler index which was returned by
+ *	amdgpu_dm_irq_register_interrupt
+ */
+void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
+					enum dc_irq_source irq_source,
+					void *ih_index);
+
+void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev);
+
+void amdgpu_dm_hpd_init(struct amdgpu_device *adev);
+void amdgpu_dm_hpd_fini(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend.
+ *
+ */
+int amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume.
+ * amdgpu_dm_irq_resume - enable ASIC interrupt during resume.
+ *
+ */
+int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
+int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev);
+
+#endif /* __AMDGPU_DM_IRQ_H__ */

+ 446 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c

@@ -0,0 +1,446 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/version.h>
+#include <drm/drm_atomic_helper.h>
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_mst_types.h"
+
+#include "dc.h"
+#include "dm_helpers.h"
+
+#include "dc_link_ddc.h"
+
+/* #define TRACE_DPCD */
+
+#ifdef TRACE_DPCD
+#define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
+
+static inline char *side_band_msg_type_to_str(uint32_t address)
+{
+	static char str[10] = {0};
+
+	if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
+		strcpy(str, "DOWN_REQ");
+	else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
+		strcpy(str, "UP_REP");
+	else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
+		strcpy(str, "DOWN_REP");
+	else
+		strcpy(str, "UP_REQ");
+
+	return str;
+}
+
+static void log_dpcd(uint8_t type,
+		     uint32_t address,
+		     uint8_t *data,
+		     uint32_t size,
+		     bool res)
+{
+	DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
+			(type == DP_AUX_NATIVE_READ) ||
+			(type == DP_AUX_I2C_READ) ?
+					"Read" : "Write",
+			address,
+			SIDE_BAND_MSG(address) ?
+					side_band_msg_type_to_str(address) : "Nop",
+			res ? "OK" : "Fail");
+
+	if (res) {
+		print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
+	}
+}
+#endif
+
+static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+				  struct drm_dp_aux_msg *msg)
+{
+	enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
+		I2C_MOT_TRUE : I2C_MOT_FALSE;
+	enum ddc_result res;
+
+	switch (msg->request & ~DP_AUX_I2C_MOT) {
+	case DP_AUX_NATIVE_READ:
+		res = dal_ddc_service_read_dpcd_data(
+				TO_DM_AUX(aux)->ddc_service,
+				false,
+				I2C_MOT_UNDEF,
+				msg->address,
+				msg->buffer,
+				msg->size);
+		break;
+	case DP_AUX_NATIVE_WRITE:
+		res = dal_ddc_service_write_dpcd_data(
+				TO_DM_AUX(aux)->ddc_service,
+				false,
+				I2C_MOT_UNDEF,
+				msg->address,
+				msg->buffer,
+				msg->size);
+		break;
+	case DP_AUX_I2C_READ:
+		res = dal_ddc_service_read_dpcd_data(
+				TO_DM_AUX(aux)->ddc_service,
+				true,
+				mot,
+				msg->address,
+				msg->buffer,
+				msg->size);
+		break;
+	case DP_AUX_I2C_WRITE:
+		res = dal_ddc_service_write_dpcd_data(
+				TO_DM_AUX(aux)->ddc_service,
+				true,
+				mot,
+				msg->address,
+				msg->buffer,
+				msg->size);
+		break;
+	default:
+		return 0;
+	}
+
+#ifdef TRACE_DPCD
+	log_dpcd(msg->request,
+		 msg->address,
+		 msg->buffer,
+		 msg->size,
+		 r == DDC_RESULT_SUCESSFULL);
+#endif
+
+	return msg->size;
+}
+
+static enum drm_connector_status
+dm_dp_mst_detect(struct drm_connector *connector, bool force)
+{
+	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+	struct amdgpu_dm_connector *master = aconnector->mst_port;
+
+	enum drm_connector_status status =
+		drm_dp_mst_detect_port(
+			connector,
+			&master->mst_mgr,
+			aconnector->port);
+
+	return status;
+}
+
+static void
+dm_dp_mst_connector_destroy(struct drm_connector *connector)
+{
+	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+	struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
+
+	drm_encoder_cleanup(&amdgpu_encoder->base);
+	kfree(amdgpu_encoder);
+	drm_connector_cleanup(connector);
+	kfree(amdgpu_dm_connector);
+}
+
+static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
+	.detect = dm_dp_mst_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = dm_dp_mst_connector_destroy,
+	.reset = amdgpu_dm_connector_funcs_reset,
+	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
+	.atomic_get_property = amdgpu_dm_connector_atomic_get_property
+};
+
+static int dm_connector_update_modes(struct drm_connector *connector,
+				struct edid *edid)
+{
+	int ret;
+
+	ret = drm_add_edid_modes(connector, edid);
+	drm_edid_to_eld(connector, edid);
+
+	return ret;
+}
+
+static int dm_dp_mst_get_modes(struct drm_connector *connector)
+{
+	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+	int ret = 0;
+
+	if (!aconnector)
+		return dm_connector_update_modes(connector, NULL);
+
+	if (!aconnector->edid) {
+		struct edid *edid;
+		struct dc_sink *dc_sink;
+		struct dc_sink_init_data init_params = {
+				.link = aconnector->dc_link,
+				.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+		edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
+
+		if (!edid) {
+			drm_mode_connector_update_edid_property(
+				&aconnector->base,
+				NULL);
+			return ret;
+		}
+
+		aconnector->edid = edid;
+
+		dc_sink = dc_link_add_remote_sink(
+			aconnector->dc_link,
+			(uint8_t *)edid,
+			(edid->extensions + 1) * EDID_LENGTH,
+			&init_params);
+
+		dc_sink->priv = aconnector;
+		aconnector->dc_sink = dc_sink;
+
+		if (aconnector->dc_sink)
+			amdgpu_dm_add_sink_to_freesync_module(
+					connector, edid);
+
+		drm_mode_connector_update_edid_property(
+						&aconnector->base, edid);
+	}
+
+	ret = dm_connector_update_modes(connector, aconnector->edid);
+
+	return ret;
+}
+
+static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector)
+{
+	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+
+	return &amdgpu_dm_connector->mst_encoder->base;
+}
+
+static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
+	.get_modes = dm_dp_mst_get_modes,
+	.mode_valid = amdgpu_dm_connector_mode_valid,
+	.best_encoder = dm_mst_best_encoder,
+};
+
+static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+	kfree(encoder);
+}
+
+static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+	.destroy = amdgpu_dm_encoder_destroy,
+};
+
+static struct amdgpu_encoder *
+dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder;
+	struct drm_encoder *encoder;
+	const struct drm_connector_helper_funcs *connector_funcs =
+		connector->base.helper_private;
+	struct drm_encoder *enc_master =
+		connector_funcs->best_encoder(&connector->base);
+
+	DRM_DEBUG_KMS("enc master is %p\n", enc_master);
+	amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
+	if (!amdgpu_encoder)
+		return NULL;
+
+	encoder = &amdgpu_encoder->base;
+	encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+
+	drm_encoder_init(
+		dev,
+		&amdgpu_encoder->base,
+		&amdgpu_dm_encoder_funcs,
+		DRM_MODE_ENCODER_DPMST,
+		NULL);
+
+	drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
+
+	return amdgpu_encoder;
+}
+
+static struct drm_connector *
+dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+			struct drm_dp_mst_port *port,
+			const char *pathprop)
+{
+	struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+	struct drm_device *dev = master->base.dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_dm_connector *aconnector;
+	struct drm_connector *connector;
+	struct drm_connector_list_iter conn_iter;
+
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
+		aconnector = to_amdgpu_dm_connector(connector);
+		if (aconnector->mst_port == master
+				&& !aconnector->port) {
+			DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
+						aconnector, connector->base.id, aconnector->mst_port);
+
+			aconnector->port = port;
+			drm_mode_connector_set_path_property(connector, pathprop);
+
+			drm_connector_list_iter_end(&conn_iter);
+			return &aconnector->base;
+		}
+	}
+	drm_connector_list_iter_end(&conn_iter);
+
+	aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
+	if (!aconnector)
+		return NULL;
+
+	connector = &aconnector->base;
+	aconnector->port = port;
+	aconnector->mst_port = master;
+
+	if (drm_connector_init(
+		dev,
+		connector,
+		&dm_dp_mst_connector_funcs,
+		DRM_MODE_CONNECTOR_DisplayPort)) {
+		kfree(aconnector);
+		return NULL;
+	}
+	drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
+
+	amdgpu_dm_connector_init_helper(
+		&adev->dm,
+		aconnector,
+		DRM_MODE_CONNECTOR_DisplayPort,
+		master->dc_link,
+		master->connector_id);
+
+	aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
+
+	/*
+	 * TODO: understand why this one is needed
+	 */
+	drm_object_attach_property(
+		&connector->base,
+		dev->mode_config.path_property,
+		0);
+	drm_object_attach_property(
+		&connector->base,
+		dev->mode_config.tile_property,
+		0);
+
+	drm_mode_connector_set_path_property(connector, pathprop);
+
+	/*
+	 * Initialize connector state before adding the connectror to drm and
+	 * framebuffer lists
+	 */
+	amdgpu_dm_connector_funcs_reset(connector);
+
+	DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
+			aconnector, connector->base.id, aconnector->mst_port);
+
+	DRM_DEBUG_KMS(":%d\n", connector->base.id);
+
+	return connector;
+}
+
+static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+					struct drm_connector *connector)
+{
+	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+	DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
+				aconnector, connector->base.id, aconnector->mst_port);
+
+	aconnector->port = NULL;
+	if (aconnector->dc_sink) {
+		amdgpu_dm_remove_sink_from_freesync_module(connector);
+		dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
+		dc_sink_release(aconnector->dc_sink);
+		aconnector->dc_sink = NULL;
+	}
+	if (aconnector->edid) {
+		kfree(aconnector->edid);
+		aconnector->edid = NULL;
+	}
+
+	drm_mode_connector_update_edid_property(
+			&aconnector->base,
+			NULL);
+}
+
+static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+	struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+	struct drm_device *dev = master->base.dev;
+
+	drm_kms_helper_hotplug_event(dev);
+}
+
+static void dm_dp_mst_register_connector(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+
+	if (adev->mode_info.rfbdev)
+		drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
+	else
+		DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
+
+	drm_connector_register(connector);
+
+}
+
+static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
+	.add_connector = dm_dp_add_mst_connector,
+	.destroy_connector = dm_dp_destroy_mst_connector,
+	.hotplug = dm_dp_mst_hotplug,
+	.register_connector = dm_dp_mst_register_connector
+};
+
+void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
+				       struct amdgpu_dm_connector *aconnector)
+{
+	aconnector->dm_dp_aux.aux.name = "dmdc";
+	aconnector->dm_dp_aux.aux.dev = dm->adev->dev;
+	aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
+	aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
+
+	drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
+	aconnector->mst_mgr.cbs = &dm_mst_cbs;
+	drm_dp_mst_topology_mgr_init(
+		&aconnector->mst_mgr,
+		dm->adev->ddev,
+		&aconnector->dm_dp_aux.aux,
+		16,
+		4,
+		aconnector->connector_id);
+}
+

+ 35 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h

@@ -0,0 +1,35 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AMDGPU_DM_MST_TYPES_H__
+#define __DAL_AMDGPU_DM_MST_TYPES_H__
+
+struct amdgpu_display_manager;
+struct amdgpu_dm_connector;
+
+void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
+				       struct amdgpu_dm_connector *aconnector);
+
+#endif

+ 379 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c

@@ -0,0 +1,379 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/acpi.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+#include "amdgpu_pm.h"
+
+unsigned long long dm_get_timestamp(struct dc_context *ctx)
+{
+	/* TODO: return actual timestamp */
+	return 0;
+}
+
+bool dm_write_persistent_data(struct dc_context *ctx,
+		const struct dc_sink *sink,
+		const char *module_name,
+		const char *key_name,
+		void *params,
+		unsigned int size,
+		struct persistent_data_flag *flag)
+{
+	/*TODO implement*/
+	return false;
+}
+
+bool dm_read_persistent_data(struct dc_context *ctx,
+				const struct dc_sink *sink,
+				const char *module_name,
+				const char *key_name,
+				void *params,
+				unsigned int size,
+				struct persistent_data_flag *flag)
+{
+	/*TODO implement*/
+	return false;
+}
+
+/**** power component interfaces ****/
+
+bool dm_pp_pre_dce_clock_change(
+		struct dc_context *ctx,
+		struct dm_pp_gpu_clock_range *requested_state,
+		struct dm_pp_gpu_clock_range *actual_state)
+{
+	/*TODO*/
+	return false;
+}
+
+bool dm_pp_apply_display_requirements(
+		const struct dc_context *ctx,
+		const struct dm_pp_display_configuration *pp_display_cfg)
+{
+	struct amdgpu_device *adev = ctx->driver_context;
+
+	if (adev->pm.dpm_enabled) {
+
+		memset(&adev->pm.pm_display_cfg, 0,
+				sizeof(adev->pm.pm_display_cfg));
+
+		adev->pm.pm_display_cfg.cpu_cc6_disable =
+			pp_display_cfg->cpu_cc6_disable;
+
+		adev->pm.pm_display_cfg.cpu_pstate_disable =
+			pp_display_cfg->cpu_pstate_disable;
+
+		adev->pm.pm_display_cfg.cpu_pstate_separation_time =
+			pp_display_cfg->cpu_pstate_separation_time;
+
+		adev->pm.pm_display_cfg.nb_pstate_switch_disable =
+			pp_display_cfg->nb_pstate_switch_disable;
+
+		adev->pm.pm_display_cfg.num_display =
+				pp_display_cfg->display_count;
+		adev->pm.pm_display_cfg.num_path_including_non_display =
+				pp_display_cfg->display_count;
+
+		adev->pm.pm_display_cfg.min_core_set_clock =
+				pp_display_cfg->min_engine_clock_khz/10;
+		adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
+				pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
+		adev->pm.pm_display_cfg.min_mem_set_clock =
+				pp_display_cfg->min_memory_clock_khz/10;
+
+		adev->pm.pm_display_cfg.multi_monitor_in_sync =
+				pp_display_cfg->all_displays_in_sync;
+		adev->pm.pm_display_cfg.min_vblank_time =
+				pp_display_cfg->avail_mclk_switch_time_us;
+
+		adev->pm.pm_display_cfg.display_clk =
+				pp_display_cfg->disp_clk_khz/10;
+
+		adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
+				pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
+
+		adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
+		adev->pm.pm_display_cfg.line_time_in_us =
+				pp_display_cfg->line_time_in_us;
+
+		adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
+		adev->pm.pm_display_cfg.crossfire_display_index = -1;
+		adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
+
+		/* TODO: complete implementation of
+		 * amd_powerplay_display_configuration_change().
+		 * Follow example of:
+		 * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
+		 * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
+		amd_powerplay_display_configuration_change(
+				adev->powerplay.pp_handle,
+				&adev->pm.pm_display_cfg);
+
+		/* TODO: replace by a separate call to 'apply display cfg'? */
+		amdgpu_pm_compute_clocks(adev);
+	}
+
+	return true;
+}
+
+bool dc_service_get_system_clocks_range(
+		const struct dc_context *ctx,
+		struct dm_pp_gpu_clock_range *sys_clks)
+{
+	struct amdgpu_device *adev = ctx->driver_context;
+
+	/* Default values, in case PPLib is not compiled-in. */
+	sys_clks->mclk.max_khz = 800000;
+	sys_clks->mclk.min_khz = 800000;
+
+	sys_clks->sclk.max_khz = 600000;
+	sys_clks->sclk.min_khz = 300000;
+
+	if (adev->pm.dpm_enabled) {
+		sys_clks->mclk.max_khz = amdgpu_dpm_get_mclk(adev, false);
+		sys_clks->mclk.min_khz = amdgpu_dpm_get_mclk(adev, true);
+
+		sys_clks->sclk.max_khz = amdgpu_dpm_get_sclk(adev, false);
+		sys_clks->sclk.min_khz = amdgpu_dpm_get_sclk(adev, true);
+	}
+
+	return true;
+}
+
+static void get_default_clock_levels(
+		enum dm_pp_clock_type clk_type,
+		struct dm_pp_clock_levels *clks)
+{
+	uint32_t disp_clks_in_khz[6] = {
+			300000, 400000, 496560, 626090, 685720, 757900 };
+	uint32_t sclks_in_khz[6] = {
+			300000, 360000, 423530, 514290, 626090, 720000 };
+	uint32_t mclks_in_khz[2] = { 333000, 800000 };
+
+	switch (clk_type) {
+	case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+		clks->num_levels = 6;
+		memmove(clks->clocks_in_khz, disp_clks_in_khz,
+				sizeof(disp_clks_in_khz));
+		break;
+	case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+		clks->num_levels = 6;
+		memmove(clks->clocks_in_khz, sclks_in_khz,
+				sizeof(sclks_in_khz));
+		break;
+	case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+		clks->num_levels = 2;
+		memmove(clks->clocks_in_khz, mclks_in_khz,
+				sizeof(mclks_in_khz));
+		break;
+	default:
+		clks->num_levels = 0;
+		break;
+	}
+}
+
+static enum amd_pp_clock_type dc_to_pp_clock_type(
+		enum dm_pp_clock_type dm_pp_clk_type)
+{
+	enum amd_pp_clock_type amd_pp_clk_type = 0;
+
+	switch (dm_pp_clk_type) {
+	case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+		amd_pp_clk_type = amd_pp_disp_clock;
+		break;
+	case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+		amd_pp_clk_type = amd_pp_sys_clock;
+		break;
+	case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+		amd_pp_clk_type = amd_pp_mem_clock;
+		break;
+	default:
+		DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
+				dm_pp_clk_type);
+		break;
+	}
+
+	return amd_pp_clk_type;
+}
+
+static void pp_to_dc_clock_levels(
+		const struct amd_pp_clocks *pp_clks,
+		struct dm_pp_clock_levels *dc_clks,
+		enum dm_pp_clock_type dc_clk_type)
+{
+	uint32_t i;
+
+	if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
+		DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+				DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+				pp_clks->count,
+				DM_PP_MAX_CLOCK_LEVELS);
+
+		dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+	} else
+		dc_clks->num_levels = pp_clks->count;
+
+	DRM_INFO("DM_PPLIB: values for %s clock\n",
+			DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+	for (i = 0; i < dc_clks->num_levels; i++) {
+		DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
+		/* translate 10kHz to kHz */
+		dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
+	}
+}
+
+bool dm_pp_get_clock_levels_by_type(
+		const struct dc_context *ctx,
+		enum dm_pp_clock_type clk_type,
+		struct dm_pp_clock_levels *dc_clks)
+{
+	struct amdgpu_device *adev = ctx->driver_context;
+	void *pp_handle = adev->powerplay.pp_handle;
+	struct amd_pp_clocks pp_clks = { 0 };
+	struct amd_pp_simple_clock_info validation_clks = { 0 };
+	uint32_t i;
+
+	if (amd_powerplay_get_clock_by_type(pp_handle,
+			dc_to_pp_clock_type(clk_type), &pp_clks)) {
+		/* Error in pplib. Provide default values. */
+		get_default_clock_levels(clk_type, dc_clks);
+		return true;
+	}
+
+	pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
+
+	if (amd_powerplay_get_display_mode_validation_clocks(pp_handle,
+			&validation_clks)) {
+		/* Error in pplib. Provide default values. */
+		DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
+		validation_clks.engine_max_clock = 72000;
+		validation_clks.memory_max_clock = 80000;
+		validation_clks.level = 0;
+	}
+
+	DRM_INFO("DM_PPLIB: Validation clocks:\n");
+	DRM_INFO("DM_PPLIB:    engine_max_clock: %d\n",
+			validation_clks.engine_max_clock);
+	DRM_INFO("DM_PPLIB:    memory_max_clock: %d\n",
+			validation_clks.memory_max_clock);
+	DRM_INFO("DM_PPLIB:    level           : %d\n",
+			validation_clks.level);
+
+	/* Translate 10 kHz to kHz. */
+	validation_clks.engine_max_clock *= 10;
+	validation_clks.memory_max_clock *= 10;
+
+	/* Determine the highest non-boosted level from the Validation Clocks */
+	if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
+		for (i = 0; i < dc_clks->num_levels; i++) {
+			if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
+				/* This clock is higher the validation clock.
+				 * Than means the previous one is the highest
+				 * non-boosted one. */
+				DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
+						dc_clks->num_levels, i);
+				dc_clks->num_levels = i > 0 ? i : 1;
+				break;
+			}
+		}
+	} else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
+		for (i = 0; i < dc_clks->num_levels; i++) {
+			if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
+				DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
+						dc_clks->num_levels, i);
+				dc_clks->num_levels = i > 0 ? i : 1;
+				break;
+			}
+		}
+	}
+
+	return true;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_latency(
+	const struct dc_context *ctx,
+	enum dm_pp_clock_type clk_type,
+	struct dm_pp_clock_levels_with_latency *clk_level_info)
+{
+	/* TODO: to be implemented */
+	return false;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_voltage(
+	const struct dc_context *ctx,
+	enum dm_pp_clock_type clk_type,
+	struct dm_pp_clock_levels_with_voltage *clk_level_info)
+{
+	/* TODO: to be implemented */
+	return false;
+}
+
+bool dm_pp_notify_wm_clock_changes(
+	const struct dc_context *ctx,
+	struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
+{
+	/* TODO: to be implemented */
+	return false;
+}
+
+bool dm_pp_apply_power_level_change_request(
+	const struct dc_context *ctx,
+	struct dm_pp_power_level_change_request *level_change_req)
+{
+	/* TODO: to be implemented */
+	return false;
+}
+
+bool dm_pp_apply_clock_for_voltage_request(
+	const struct dc_context *ctx,
+	struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
+{
+	/* TODO: to be implemented */
+	return false;
+}
+
+bool dm_pp_get_static_clocks(
+	const struct dc_context *ctx,
+	struct dm_pp_static_clock_info *static_clk_info)
+{
+	/* TODO: to be implemented */
+	return false;
+}
+
+void dm_pp_get_funcs_rv(
+		struct dc_context *ctx,
+		struct pp_smu_funcs_rv *funcs)
+{}
+
+/**** end of power component interfaces ****/

+ 33 - 0
drivers/gpu/drm/amd/display/dc/Makefile

@@ -0,0 +1,33 @@
+#
+# Makefile for Display Core (dc) component.
+#
+
+DC_LIBS = basics bios calcs dce gpio i2caux irq virtual
+
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+DC_LIBS += dcn10 dml
+endif
+
+DC_LIBS += dce120
+
+DC_LIBS += dce112
+DC_LIBS += dce110
+DC_LIBS += dce100
+DC_LIBS += dce80
+
+AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
+
+include $(AMD_DC)
+
+DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
+dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o
+
+AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE))
+
+AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
+
+AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
+AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
+
+
+

+ 11 - 0
drivers/gpu/drm/amd/display/dc/basics/Makefile

@@ -0,0 +1,11 @@
+#
+# Makefile for the 'utils' sub-component of DAL.
+# It provides the general basic services required by other DAL
+# subcomponents.
+
+BASICS = conversion.o fixpt31_32.o fixpt32_32.o grph_object_id.o \
+	logger.o log_helpers.o vector.o
+
+AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_BASICS)

+ 104 - 0
drivers/gpu/drm/amd/display/dc/basics/conversion.c

@@ -0,0 +1,104 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#define DIVIDER 10000
+
+/* S2D13 value in [-3.00...0.9999] */
+#define S2D13_MIN (-3 * DIVIDER)
+#define S2D13_MAX (3 * DIVIDER)
+
+uint16_t fixed_point_to_int_frac(
+	struct fixed31_32 arg,
+	uint8_t integer_bits,
+	uint8_t fractional_bits)
+{
+	int32_t numerator;
+	int32_t divisor = 1 << fractional_bits;
+
+	uint16_t result;
+
+	uint16_t d = (uint16_t)dal_fixed31_32_floor(
+		dal_fixed31_32_abs(
+			arg));
+
+	if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
+		numerator = (uint16_t)dal_fixed31_32_floor(
+			dal_fixed31_32_mul_int(
+				arg,
+				divisor));
+	else {
+		numerator = dal_fixed31_32_floor(
+			dal_fixed31_32_sub(
+				dal_fixed31_32_from_int(
+					1LL << integer_bits),
+				dal_fixed31_32_recip(
+					dal_fixed31_32_from_int(
+						divisor))));
+	}
+
+	if (numerator >= 0)
+		result = (uint16_t)numerator;
+	else
+		result = (uint16_t)(
+		(1 << (integer_bits + fractional_bits + 1)) + numerator);
+
+	if ((result != 0) && dal_fixed31_32_lt(
+		arg, dal_fixed31_32_zero))
+		result |= 1 << (integer_bits + fractional_bits);
+
+	return result;
+}
+/**
+* convert_float_matrix
+* This converts a double into HW register spec defined format S2D13.
+* @param :
+* @return None
+*/
+void convert_float_matrix(
+	uint16_t *matrix,
+	struct fixed31_32 *flt,
+	uint32_t buffer_size)
+{
+	const struct fixed31_32 min_2_13 =
+		dal_fixed31_32_from_fraction(S2D13_MIN, DIVIDER);
+	const struct fixed31_32 max_2_13 =
+		dal_fixed31_32_from_fraction(S2D13_MAX, DIVIDER);
+	uint32_t i;
+
+	for (i = 0; i < buffer_size; ++i) {
+		uint32_t reg_value =
+				fixed_point_to_int_frac(
+					dal_fixed31_32_clamp(
+						flt[i],
+						min_2_13,
+						max_2_13),
+						2,
+						13);
+
+		matrix[i] = (uint16_t)reg_value;
+	}
+}

+ 46 - 0
drivers/gpu/drm/amd/display/dc/basics/conversion.h

@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_CONVERSION_H__
+#define __DAL_CONVERSION_H__
+
+#include "include/fixed31_32.h"
+
+uint16_t fixed_point_to_int_frac(
+	struct fixed31_32 arg,
+	uint8_t integer_bits,
+	uint8_t fractional_bits);
+
+void convert_float_matrix(
+	uint16_t *matrix,
+	struct fixed31_32 *flt,
+	uint32_t buffer_size);
+
+static inline unsigned int log_2(unsigned int num)
+{
+	return ilog2(num);
+}
+
+#endif

+ 567 - 0
drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c

@@ -0,0 +1,567 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/fixed31_32.h"
+
+static inline uint64_t abs_i64(
+	int64_t arg)
+{
+	if (arg > 0)
+		return (uint64_t)arg;
+	else
+		return (uint64_t)(-arg);
+}
+
+/*
+ * @brief
+ * result = dividend / divisor
+ * *remainder = dividend % divisor
+ */
+static inline uint64_t complete_integer_division_u64(
+	uint64_t dividend,
+	uint64_t divisor,
+	uint64_t *remainder)
+{
+	uint64_t result;
+
+	ASSERT(divisor);
+
+	result = div64_u64_rem(dividend, divisor, remainder);
+
+	return result;
+}
+
+
+#define FRACTIONAL_PART_MASK \
+	((1ULL << FIXED31_32_BITS_PER_FRACTIONAL_PART) - 1)
+
+#define GET_INTEGER_PART(x) \
+	((x) >> FIXED31_32_BITS_PER_FRACTIONAL_PART)
+
+#define GET_FRACTIONAL_PART(x) \
+	(FRACTIONAL_PART_MASK & (x))
+
+struct fixed31_32 dal_fixed31_32_from_fraction(
+	int64_t numerator,
+	int64_t denominator)
+{
+	struct fixed31_32 res;
+
+	bool arg1_negative = numerator < 0;
+	bool arg2_negative = denominator < 0;
+
+	uint64_t arg1_value = arg1_negative ? -numerator : numerator;
+	uint64_t arg2_value = arg2_negative ? -denominator : denominator;
+
+	uint64_t remainder;
+
+	/* determine integer part */
+
+	uint64_t res_value = complete_integer_division_u64(
+		arg1_value, arg2_value, &remainder);
+
+	ASSERT(res_value <= LONG_MAX);
+
+	/* determine fractional part */
+	{
+		uint32_t i = FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+		do {
+			remainder <<= 1;
+
+			res_value <<= 1;
+
+			if (remainder >= arg2_value) {
+				res_value |= 1;
+				remainder -= arg2_value;
+			}
+		} while (--i != 0);
+	}
+
+	/* round up LSB */
+	{
+		uint64_t summand = (remainder << 1) >= arg2_value;
+
+		ASSERT(res_value <= LLONG_MAX - summand);
+
+		res_value += summand;
+	}
+
+	res.value = (int64_t)res_value;
+
+	if (arg1_negative ^ arg2_negative)
+		res.value = -res.value;
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_from_int_nonconst(
+	int64_t arg)
+{
+	struct fixed31_32 res;
+
+	ASSERT((LONG_MIN <= arg) && (arg <= LONG_MAX));
+
+	res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_shl(
+	struct fixed31_32 arg,
+	uint8_t shift)
+{
+	struct fixed31_32 res;
+
+	ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
+		((arg.value < 0) && (arg.value >= LLONG_MIN >> shift)));
+
+	res.value = arg.value << shift;
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_add(
+	struct fixed31_32 arg1,
+	struct fixed31_32 arg2)
+{
+	struct fixed31_32 res;
+
+	ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
+		((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
+
+	res.value = arg1.value + arg2.value;
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sub(
+	struct fixed31_32 arg1,
+	struct fixed31_32 arg2)
+{
+	struct fixed31_32 res;
+
+	ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
+		((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
+
+	res.value = arg1.value - arg2.value;
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_mul(
+	struct fixed31_32 arg1,
+	struct fixed31_32 arg2)
+{
+	struct fixed31_32 res;
+
+	bool arg1_negative = arg1.value < 0;
+	bool arg2_negative = arg2.value < 0;
+
+	uint64_t arg1_value = arg1_negative ? -arg1.value : arg1.value;
+	uint64_t arg2_value = arg2_negative ? -arg2.value : arg2.value;
+
+	uint64_t arg1_int = GET_INTEGER_PART(arg1_value);
+	uint64_t arg2_int = GET_INTEGER_PART(arg2_value);
+
+	uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
+	uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
+
+	uint64_t tmp;
+
+	res.value = arg1_int * arg2_int;
+
+	ASSERT(res.value <= LONG_MAX);
+
+	res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+	tmp = arg1_int * arg2_fra;
+
+	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+	res.value += tmp;
+
+	tmp = arg2_int * arg1_fra;
+
+	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+	res.value += tmp;
+
+	tmp = arg1_fra * arg2_fra;
+
+	tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
+		(tmp >= (uint64_t)dal_fixed31_32_half.value);
+
+	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+	res.value += tmp;
+
+	if (arg1_negative ^ arg2_negative)
+		res.value = -res.value;
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sqr(
+	struct fixed31_32 arg)
+{
+	struct fixed31_32 res;
+
+	uint64_t arg_value = abs_i64(arg.value);
+
+	uint64_t arg_int = GET_INTEGER_PART(arg_value);
+
+	uint64_t arg_fra = GET_FRACTIONAL_PART(arg_value);
+
+	uint64_t tmp;
+
+	res.value = arg_int * arg_int;
+
+	ASSERT(res.value <= LONG_MAX);
+
+	res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+	tmp = arg_int * arg_fra;
+
+	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+	res.value += tmp;
+
+	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+	res.value += tmp;
+
+	tmp = arg_fra * arg_fra;
+
+	tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
+		(tmp >= (uint64_t)dal_fixed31_32_half.value);
+
+	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+	res.value += tmp;
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_recip(
+	struct fixed31_32 arg)
+{
+	/*
+	 * @note
+	 * Good idea to use Newton's method
+	 */
+
+	ASSERT(arg.value);
+
+	return dal_fixed31_32_from_fraction(
+		dal_fixed31_32_one.value,
+		arg.value);
+}
+
+struct fixed31_32 dal_fixed31_32_sinc(
+	struct fixed31_32 arg)
+{
+	struct fixed31_32 square;
+
+	struct fixed31_32 res = dal_fixed31_32_one;
+
+	int32_t n = 27;
+
+	struct fixed31_32 arg_norm = arg;
+
+	if (dal_fixed31_32_le(
+		dal_fixed31_32_two_pi,
+		dal_fixed31_32_abs(arg))) {
+		arg_norm = dal_fixed31_32_sub(
+			arg_norm,
+			dal_fixed31_32_mul_int(
+				dal_fixed31_32_two_pi,
+				(int32_t)div64_s64(
+					arg_norm.value,
+					dal_fixed31_32_two_pi.value)));
+	}
+
+	square = dal_fixed31_32_sqr(arg_norm);
+
+	do {
+		res = dal_fixed31_32_sub(
+			dal_fixed31_32_one,
+			dal_fixed31_32_div_int(
+				dal_fixed31_32_mul(
+					square,
+					res),
+				n * (n - 1)));
+
+		n -= 2;
+	} while (n > 2);
+
+	if (arg.value != arg_norm.value)
+		res = dal_fixed31_32_div(
+			dal_fixed31_32_mul(res, arg_norm),
+			arg);
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sin(
+	struct fixed31_32 arg)
+{
+	return dal_fixed31_32_mul(
+		arg,
+		dal_fixed31_32_sinc(arg));
+}
+
+struct fixed31_32 dal_fixed31_32_cos(
+	struct fixed31_32 arg)
+{
+	/* TODO implement argument normalization */
+
+	const struct fixed31_32 square = dal_fixed31_32_sqr(arg);
+
+	struct fixed31_32 res = dal_fixed31_32_one;
+
+	int32_t n = 26;
+
+	do {
+		res = dal_fixed31_32_sub(
+			dal_fixed31_32_one,
+			dal_fixed31_32_div_int(
+				dal_fixed31_32_mul(
+					square,
+					res),
+				n * (n - 1)));
+
+		n -= 2;
+	} while (n != 0);
+
+	return res;
+}
+
+/*
+ * @brief
+ * result = exp(arg),
+ * where abs(arg) < 1
+ *
+ * Calculated as Taylor series.
+ */
+static struct fixed31_32 fixed31_32_exp_from_taylor_series(
+	struct fixed31_32 arg)
+{
+	uint32_t n = 9;
+
+	struct fixed31_32 res = dal_fixed31_32_from_fraction(
+		n + 2,
+		n + 1);
+	/* TODO find correct res */
+
+	ASSERT(dal_fixed31_32_lt(arg, dal_fixed31_32_one));
+
+	do
+		res = dal_fixed31_32_add(
+			dal_fixed31_32_one,
+			dal_fixed31_32_div_int(
+				dal_fixed31_32_mul(
+					arg,
+					res),
+				n));
+	while (--n != 1);
+
+	return dal_fixed31_32_add(
+		dal_fixed31_32_one,
+		dal_fixed31_32_mul(
+			arg,
+			res));
+}
+
+struct fixed31_32 dal_fixed31_32_exp(
+	struct fixed31_32 arg)
+{
+	/*
+	 * @brief
+	 * Main equation is:
+	 * exp(x) = exp(r + m * ln(2)) = (1 << m) * exp(r),
+	 * where m = round(x / ln(2)), r = x - m * ln(2)
+	 */
+
+	if (dal_fixed31_32_le(
+		dal_fixed31_32_ln2_div_2,
+		dal_fixed31_32_abs(arg))) {
+		int32_t m = dal_fixed31_32_round(
+			dal_fixed31_32_div(
+				arg,
+				dal_fixed31_32_ln2));
+
+		struct fixed31_32 r = dal_fixed31_32_sub(
+			arg,
+			dal_fixed31_32_mul_int(
+				dal_fixed31_32_ln2,
+				m));
+
+		ASSERT(m != 0);
+
+		ASSERT(dal_fixed31_32_lt(
+			dal_fixed31_32_abs(r),
+			dal_fixed31_32_one));
+
+		if (m > 0)
+			return dal_fixed31_32_shl(
+				fixed31_32_exp_from_taylor_series(r),
+				(uint8_t)m);
+		else
+			return dal_fixed31_32_div_int(
+				fixed31_32_exp_from_taylor_series(r),
+				1LL << -m);
+	} else if (arg.value != 0)
+		return fixed31_32_exp_from_taylor_series(arg);
+	else
+		return dal_fixed31_32_one;
+}
+
+struct fixed31_32 dal_fixed31_32_log(
+	struct fixed31_32 arg)
+{
+	struct fixed31_32 res = dal_fixed31_32_neg(dal_fixed31_32_one);
+	/* TODO improve 1st estimation */
+
+	struct fixed31_32 error;
+
+	ASSERT(arg.value > 0);
+	/* TODO if arg is negative, return NaN */
+	/* TODO if arg is zero, return -INF */
+
+	do {
+		struct fixed31_32 res1 = dal_fixed31_32_add(
+			dal_fixed31_32_sub(
+				res,
+				dal_fixed31_32_one),
+			dal_fixed31_32_div(
+				arg,
+				dal_fixed31_32_exp(res)));
+
+		error = dal_fixed31_32_sub(
+			res,
+			res1);
+
+		res = res1;
+		/* TODO determine max_allowed_error based on quality of exp() */
+	} while (abs_i64(error.value) > 100ULL);
+
+	return res;
+}
+
+struct fixed31_32 dal_fixed31_32_pow(
+	struct fixed31_32 arg1,
+	struct fixed31_32 arg2)
+{
+	return dal_fixed31_32_exp(
+		dal_fixed31_32_mul(
+			dal_fixed31_32_log(arg1),
+			arg2));
+}
+
+int32_t dal_fixed31_32_floor(
+	struct fixed31_32 arg)
+{
+	uint64_t arg_value = abs_i64(arg.value);
+
+	if (arg.value >= 0)
+		return (int32_t)GET_INTEGER_PART(arg_value);
+	else
+		return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+int32_t dal_fixed31_32_round(
+	struct fixed31_32 arg)
+{
+	uint64_t arg_value = abs_i64(arg.value);
+
+	const int64_t summand = dal_fixed31_32_half.value;
+
+	ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
+
+	arg_value += summand;
+
+	if (arg.value >= 0)
+		return (int32_t)GET_INTEGER_PART(arg_value);
+	else
+		return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+int32_t dal_fixed31_32_ceil(
+	struct fixed31_32 arg)
+{
+	uint64_t arg_value = abs_i64(arg.value);
+
+	const int64_t summand = dal_fixed31_32_one.value -
+		dal_fixed31_32_epsilon.value;
+
+	ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
+
+	arg_value += summand;
+
+	if (arg.value >= 0)
+		return (int32_t)GET_INTEGER_PART(arg_value);
+	else
+		return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+/* this function is a generic helper to translate fixed point value to
+ * specified integer format that will consist of integer_bits integer part and
+ * fractional_bits fractional part. For example it is used in
+ * dal_fixed31_32_u2d19 to receive 2 bits integer part and 19 bits fractional
+ * part in 32 bits. It is used in hw programming (scaler)
+ */
+
+static inline uint32_t ux_dy(
+	int64_t value,
+	uint32_t integer_bits,
+	uint32_t fractional_bits)
+{
+	/* 1. create mask of integer part */
+	uint32_t result = (1 << integer_bits) - 1;
+	/* 2. mask out fractional part */
+	uint32_t fractional_part = FRACTIONAL_PART_MASK & value;
+	/* 3. shrink fixed point integer part to be of integer_bits width*/
+	result &= GET_INTEGER_PART(value);
+	/* 4. make space for fractional part to be filled in after integer */
+	result <<= fractional_bits;
+	/* 5. shrink fixed point fractional part to of fractional_bits width*/
+	fractional_part >>= FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits;
+	/* 6. merge the result */
+	return result | fractional_part;
+}
+
+uint32_t dal_fixed31_32_u2d19(
+	struct fixed31_32 arg)
+{
+	return ux_dy(arg.value, 2, 19);
+}
+
+uint32_t dal_fixed31_32_u0d19(
+	struct fixed31_32 arg)
+{
+	return ux_dy(arg.value, 0, 19);
+}

+ 161 - 0
drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c

@@ -0,0 +1,161 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/fixed32_32.h"
+
+static uint64_t u64_div(uint64_t n, uint64_t d)
+{
+	uint32_t i = 0;
+	uint64_t r;
+	uint64_t q = div64_u64_rem(n, d, &r);
+
+	for (i = 0; i < 32; ++i) {
+		uint64_t sbit = q & (1ULL<<63);
+
+		r <<= 1;
+		r |= sbit ? 1 : 0;
+		q <<= 1;
+		if (r >= d) {
+			r -= d;
+			q |= 1;
+		}
+	}
+
+	if (2*r >= d)
+		q += 1;
+	return q;
+}
+
+struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d)
+{
+	struct fixed32_32 fx;
+
+	fx.value = u64_div((uint64_t)n << 32, (uint64_t)d << 32);
+	return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_add(
+	struct fixed32_32 lhs,
+	struct fixed32_32 rhs)
+{
+	struct fixed32_32 fx = {lhs.value + rhs.value};
+
+	ASSERT(fx.value >= rhs.value);
+	return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_add_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+	struct fixed32_32 fx = {lhs.value + ((uint64_t)rhs << 32)};
+
+	ASSERT(fx.value >= (uint64_t)rhs << 32);
+	return fx;
+
+}
+struct fixed32_32 dal_fixed32_32_sub(
+	struct fixed32_32 lhs,
+	struct fixed32_32 rhs)
+{
+	struct fixed32_32 fx;
+
+	ASSERT(lhs.value >= rhs.value);
+	fx.value = lhs.value - rhs.value;
+	return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_sub_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+	struct fixed32_32 fx;
+
+	ASSERT(lhs.value >= ((uint64_t)rhs<<32));
+	fx.value = lhs.value - ((uint64_t)rhs<<32);
+	return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_mul(
+	struct fixed32_32 lhs,
+	struct fixed32_32 rhs)
+{
+	struct fixed32_32 fx;
+	uint64_t lhs_int = lhs.value>>32;
+	uint64_t lhs_frac = (uint32_t)lhs.value;
+	uint64_t rhs_int = rhs.value>>32;
+	uint64_t rhs_frac = (uint32_t)rhs.value;
+	uint64_t ahbh = lhs_int * rhs_int;
+	uint64_t ahbl = lhs_int * rhs_frac;
+	uint64_t albh = lhs_frac * rhs_int;
+	uint64_t albl = lhs_frac * rhs_frac;
+
+	ASSERT((ahbh>>32) == 0);
+
+	fx.value = (ahbh<<32) + ahbl + albh + (albl>>32);
+	return fx;
+
+}
+
+struct fixed32_32 dal_fixed32_32_mul_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+	struct fixed32_32 fx;
+	uint64_t lhsi = (lhs.value>>32) * (uint64_t)rhs;
+	uint64_t lhsf;
+
+	ASSERT((lhsi>>32) == 0);
+	lhsf = ((uint32_t)lhs.value) * (uint64_t)rhs;
+	ASSERT((lhsi<<32) + lhsf >= lhsf);
+	fx.value = (lhsi<<32) + lhsf;
+	return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_div(
+	struct fixed32_32 lhs,
+	struct fixed32_32 rhs)
+{
+	struct fixed32_32 fx;
+
+	fx.value = u64_div(lhs.value, rhs.value);
+	return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_div_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+	struct fixed32_32 fx;
+
+	fx.value = u64_div(lhs.value, (uint64_t)rhs << 32);
+	return fx;
+}
+
+uint32_t dal_fixed32_32_ceil(struct fixed32_32 v)
+{
+	ASSERT((uint32_t)v.value ? (v.value >> 32) + 1 >= 1 : true);
+	return (v.value>>32) + ((uint32_t)v.value ? 1 : 0);
+}
+
+uint32_t dal_fixed32_32_round(struct fixed32_32 v)
+{
+	ASSERT(v.value + (1ULL<<31) >= (1ULL<<31));
+	return (v.value + (1ULL<<31))>>32;
+}
+

+ 75 - 0
drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c

@@ -0,0 +1,75 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/grph_object_id.h"
+
+static bool dal_graphics_object_id_is_valid(struct graphics_object_id id)
+{
+	bool rc = true;
+
+	switch (id.type) {
+	case OBJECT_TYPE_UNKNOWN:
+		rc = false;
+		break;
+	case OBJECT_TYPE_GPU:
+	case OBJECT_TYPE_ENGINE:
+		/* do NOT check for id.id == 0 */
+		if (id.enum_id == ENUM_ID_UNKNOWN)
+			rc = false;
+		break;
+	default:
+		if (id.id == 0 || id.enum_id == ENUM_ID_UNKNOWN)
+			rc = false;
+		break;
+	}
+
+	return rc;
+}
+
+bool dal_graphics_object_id_is_equal(
+	struct graphics_object_id id1,
+	struct graphics_object_id id2)
+{
+	if (false == dal_graphics_object_id_is_valid(id1)) {
+		dm_output_to_console(
+		"%s: Warning: comparing invalid object 'id1'!\n", __func__);
+		return false;
+	}
+
+	if (false == dal_graphics_object_id_is_valid(id2)) {
+		dm_output_to_console(
+		"%s: Warning: comparing invalid object 'id2'!\n", __func__);
+		return false;
+	}
+
+	if (id1.id == id2.id && id1.enum_id == id2.enum_id
+		&& id1.type == id2.type)
+		return true;
+
+	return false;
+}
+
+

+ 102 - 0
drivers/gpu/drm/amd/display/dc/basics/log_helpers.c

@@ -0,0 +1,102 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "logger.h"
+#include "include/logger_interface.h"
+#include "dm_helpers.h"
+
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
+struct dc_signal_type_info {
+	enum signal_type type;
+	char name[MAX_NAME_LEN];
+};
+
+static const struct dc_signal_type_info signal_type_info_tbl[] = {
+		{SIGNAL_TYPE_NONE,             "NC"},
+		{SIGNAL_TYPE_DVI_SINGLE_LINK,  "DVI"},
+		{SIGNAL_TYPE_DVI_DUAL_LINK,    "DDVI"},
+		{SIGNAL_TYPE_HDMI_TYPE_A,      "HDMIA"},
+		{SIGNAL_TYPE_LVDS,             "LVDS"},
+		{SIGNAL_TYPE_RGB,              "VGA"},
+		{SIGNAL_TYPE_DISPLAY_PORT,     "DP"},
+		{SIGNAL_TYPE_DISPLAY_PORT_MST, "MST"},
+		{SIGNAL_TYPE_EDP,              "eDP"},
+		{SIGNAL_TYPE_VIRTUAL,          "Virtual"}
+};
+
+void dc_conn_log(struct dc_context *ctx,
+		const struct dc_link *link,
+		uint8_t *hex_data,
+		int hex_data_count,
+		enum dc_log_type event,
+		const char *msg,
+		...)
+{
+	int i;
+	va_list args;
+	struct log_entry entry = { 0 };
+	enum signal_type signal;
+
+	if (link->local_sink)
+		signal = link->local_sink->sink_signal;
+	else
+		signal = link->connector_signal;
+
+	if (link->type == dc_connection_mst_branch)
+		signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+
+	dm_logger_open(ctx->logger, &entry, event);
+
+	for (i = 0; i < NUM_ELEMENTS(signal_type_info_tbl); i++)
+		if (signal == signal_type_info_tbl[i].type)
+			break;
+
+	dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
+			signal_type_info_tbl[i].name,
+			link->link_index);
+
+	va_start(args, msg);
+	entry.buf_offset += dm_log_to_buffer(
+		&entry.buf[entry.buf_offset],
+		LOG_MAX_LINE_SIZE - entry.buf_offset,
+		msg, args);
+
+	if (entry.buf[strlen(entry.buf) - 1] == '\n') {
+		entry.buf[strlen(entry.buf) - 1] = '\0';
+		entry.buf_offset--;
+	}
+
+	if (hex_data)
+		for (i = 0; i < hex_data_count; i++)
+			dm_logger_append(&entry, "%2.2X ", hex_data[i]);
+
+	dm_logger_append(&entry, "^\n");
+	dm_helpers_dc_conn_log(ctx, &entry, event);
+	dm_logger_close(&entry);
+
+	va_end(args);
+}

+ 397 - 0
drivers/gpu/drm/amd/display/dc/basics/logger.c

@@ -0,0 +1,397 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "include/logger_interface.h"
+#include "logger.h"
+
+
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct dc_log_type_info log_type_info_tbl[] = {
+		{LOG_ERROR,                 "Error"},
+		{LOG_WARNING,               "Warning"},
+		{LOG_DEBUG,		    "Debug"},
+		{LOG_DC,                    "DC_Interface"},
+		{LOG_SURFACE,               "Surface"},
+		{LOG_HW_HOTPLUG,            "HW_Hotplug"},
+		{LOG_HW_LINK_TRAINING,      "HW_LKTN"},
+		{LOG_HW_SET_MODE,           "HW_Mode"},
+		{LOG_HW_RESUME_S3,          "HW_Resume"},
+		{LOG_HW_AUDIO,              "HW_Audio"},
+		{LOG_HW_HPD_IRQ,            "HW_HPDIRQ"},
+		{LOG_MST,                   "MST"},
+		{LOG_SCALER,                "Scaler"},
+		{LOG_BIOS,                  "BIOS"},
+		{LOG_BANDWIDTH_CALCS,       "BWCalcs"},
+		{LOG_BANDWIDTH_VALIDATION,  "BWValidation"},
+		{LOG_I2C_AUX,               "I2C_AUX"},
+		{LOG_SYNC,                  "Sync"},
+		{LOG_BACKLIGHT,             "Backlight"},
+		{LOG_FEATURE_OVERRIDE,      "Override"},
+		{LOG_DETECTION_EDID_PARSER, "Edid"},
+		{LOG_DETECTION_DP_CAPS,     "DP_Caps"},
+		{LOG_RESOURCE,              "Resource"},
+		{LOG_DML,                   "DML"},
+		{LOG_EVENT_MODE_SET,        "Mode"},
+		{LOG_EVENT_DETECTION,       "Detect"},
+		{LOG_EVENT_LINK_TRAINING,   "LKTN"},
+		{LOG_EVENT_LINK_LOSS,       "LinkLoss"},
+		{LOG_EVENT_UNDERFLOW,       "Underflow"},
+		{LOG_IF_TRACE,              "InterfaceTrace"},
+		{LOG_DTN,                   "DTN"}
+};
+
+
+/* ----------- Object init and destruction ----------- */
+static bool construct(struct dc_context *ctx, struct dal_logger *logger,
+		      uint32_t log_mask)
+{
+	/* malloc buffer and init offsets */
+	logger->log_buffer_size = DAL_LOGGER_BUFFER_MAX_SIZE;
+	logger->log_buffer = (char *)kzalloc(logger->log_buffer_size * sizeof(char),
+					     GFP_KERNEL);
+
+	if (!logger->log_buffer)
+		return false;
+
+	/* Initialize both offsets to start of buffer (empty) */
+	logger->buffer_read_offset = 0;
+	logger->buffer_write_offset = 0;
+
+	logger->open_count = 0;
+
+	logger->flags.bits.ENABLE_CONSOLE = 1;
+	logger->flags.bits.ENABLE_BUFFER = 0;
+
+	logger->ctx = ctx;
+
+	logger->mask = log_mask;
+
+	return true;
+}
+
+static void destruct(struct dal_logger *logger)
+{
+	if (logger->log_buffer) {
+		kfree(logger->log_buffer);
+		logger->log_buffer = NULL;
+	}
+}
+
+struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask)
+{
+	/* malloc struct */
+	struct dal_logger *logger = kzalloc(sizeof(struct dal_logger),
+					    GFP_KERNEL);
+
+	if (!logger)
+		return NULL;
+	if (!construct(ctx, logger, log_mask)) {
+		kfree(logger);
+		return NULL;
+	}
+
+	return logger;
+}
+
+uint32_t dal_logger_destroy(struct dal_logger **logger)
+{
+	if (logger == NULL || *logger == NULL)
+		return 1;
+	destruct(*logger);
+	kfree(*logger);
+	*logger = NULL;
+
+	return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+
+static bool dal_logger_should_log(
+	struct dal_logger *logger,
+	enum dc_log_type log_type)
+{
+	if (logger->mask & (1 << log_type))
+		return true;
+
+	return false;
+}
+
+static void log_to_debug_console(struct log_entry *entry)
+{
+	struct dal_logger *logger = entry->logger;
+
+	if (logger->flags.bits.ENABLE_CONSOLE == 0)
+		return;
+
+	if (entry->buf_offset) {
+		switch (entry->type) {
+		case LOG_ERROR:
+			dm_error("%s", entry->buf);
+			break;
+		default:
+			dm_output_to_console("%s", entry->buf);
+			break;
+		}
+	}
+}
+
+/* Print everything unread existing in log_buffer to debug console*/
+void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn)
+{
+	char *string_start = &logger->log_buffer[logger->buffer_read_offset];
+
+	if (should_warn)
+		dm_output_to_console(
+			"---------------- FLUSHING LOG BUFFER ----------------\n");
+	while (logger->buffer_read_offset < logger->buffer_write_offset) {
+
+		if (logger->log_buffer[logger->buffer_read_offset] == '\0') {
+			dm_output_to_console("%s", string_start);
+			string_start = logger->log_buffer + logger->buffer_read_offset + 1;
+		}
+		logger->buffer_read_offset++;
+	}
+	if (should_warn)
+		dm_output_to_console(
+			"-------------- END FLUSHING LOG BUFFER --------------\n\n");
+}
+
+static void log_to_internal_buffer(struct log_entry *entry)
+{
+
+	uint32_t size = entry->buf_offset;
+	struct dal_logger *logger = entry->logger;
+
+	if (logger->flags.bits.ENABLE_BUFFER == 0)
+		return;
+
+	if (logger->log_buffer == NULL)
+		return;
+
+	if (size > 0 && size < logger->log_buffer_size) {
+
+		int buffer_space = logger->log_buffer_size -
+				logger->buffer_write_offset;
+
+		if (logger->buffer_write_offset == logger->buffer_read_offset) {
+			/* Buffer is empty, start writing at beginning */
+			buffer_space = logger->log_buffer_size;
+			logger->buffer_write_offset = 0;
+			logger->buffer_read_offset = 0;
+		}
+
+		if (buffer_space > size) {
+			/* No wrap around, copy 'size' bytes
+			 * from 'entry->buf' to 'log_buffer'
+			 */
+			memmove(logger->log_buffer +
+					logger->buffer_write_offset,
+					entry->buf, size);
+			logger->buffer_write_offset += size;
+
+		} else {
+			/* Not enough room remaining, we should flush
+			 * existing logs */
+
+			/* Flush existing unread logs to console */
+			dm_logger_flush_buffer(logger, true);
+
+			/* Start writing to beginning of buffer */
+			memmove(logger->log_buffer, entry->buf, size);
+			logger->buffer_write_offset = size;
+			logger->buffer_read_offset = 0;
+		}
+
+	}
+}
+
+static void log_heading(struct log_entry *entry)
+{
+	int j;
+
+	for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
+
+		const struct dc_log_type_info *info = &log_type_info_tbl[j];
+
+		if (info->type == entry->type)
+			dm_logger_append(entry, "[%s]\t", info->name);
+	}
+}
+
+static void append_entry(
+		struct log_entry *entry,
+		char *buffer,
+		uint32_t buf_size)
+{
+	if (!entry->buf ||
+		entry->buf_offset + buf_size > entry->max_buf_bytes
+	) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	/* Todo: check if off by 1 byte due to \0 anywhere */
+	memmove(entry->buf + entry->buf_offset, buffer, buf_size);
+	entry->buf_offset += buf_size;
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* Warning: Be careful that 'msg' is null terminated and the total size is
+ * less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
+ */
+void dm_logger_write(
+	struct dal_logger *logger,
+	enum dc_log_type log_type,
+	const char *msg,
+	...)
+{
+	if (logger && dal_logger_should_log(logger, log_type)) {
+		uint32_t size;
+		va_list args;
+		char buffer[LOG_MAX_LINE_SIZE];
+		struct log_entry entry;
+
+		va_start(args, msg);
+
+		entry.logger = logger;
+
+		entry.buf = buffer;
+
+		entry.buf_offset = 0;
+		entry.max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
+
+		entry.type = log_type;
+
+		log_heading(&entry);
+
+		size = dm_log_to_buffer(
+			buffer, LOG_MAX_LINE_SIZE - 1, msg, args);
+
+		buffer[entry.buf_offset + size] = '\0';
+		entry.buf_offset += size + 1;
+
+		/* --Flush log_entry buffer-- */
+		/* print to kernel console */
+		log_to_debug_console(&entry);
+		/* log internally for dsat */
+		log_to_internal_buffer(&entry);
+
+		va_end(args);
+	}
+}
+
+/* Same as dm_logger_write, except without open() and close(), which must
+ * be done separately.
+ */
+void dm_logger_append(
+	struct log_entry *entry,
+	const char *msg,
+	...)
+{
+	struct dal_logger *logger;
+
+	if (!entry) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	logger = entry->logger;
+
+	if (logger && logger->open_count > 0 &&
+		dal_logger_should_log(logger, entry->type)) {
+
+		uint32_t size;
+		va_list args;
+		char buffer[LOG_MAX_LINE_SIZE];
+
+		va_start(args, msg);
+
+		size = dm_log_to_buffer(
+			buffer, LOG_MAX_LINE_SIZE, msg, args);
+
+		if (size < LOG_MAX_LINE_SIZE - 1) {
+			append_entry(entry, buffer, size);
+		} else {
+			append_entry(entry, "LOG_ERROR, line too long\n", 27);
+		}
+
+		va_end(args);
+	}
+}
+
+void dm_logger_open(
+		struct dal_logger *logger,
+		struct log_entry *entry, /* out */
+		enum dc_log_type log_type)
+{
+	if (!entry) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	entry->type = log_type;
+	entry->logger = logger;
+
+	entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char),
+			     GFP_KERNEL);
+
+	entry->buf_offset = 0;
+	entry->max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
+
+	logger->open_count++;
+
+	log_heading(entry);
+}
+
+void dm_logger_close(struct log_entry *entry)
+{
+	struct dal_logger *logger = entry->logger;
+
+	if (logger && logger->open_count > 0) {
+		logger->open_count--;
+	} else {
+		BREAK_TO_DEBUGGER();
+		goto cleanup;
+	}
+
+	/* --Flush log_entry buffer-- */
+	/* print to kernel console */
+	log_to_debug_console(entry);
+	/* log internally for dsat */
+	log_to_internal_buffer(entry);
+
+	/* TODO: Write end heading */
+
+cleanup:
+	if (entry->buf) {
+		kfree(entry->buf);
+		entry->buf = NULL;
+		entry->buf_offset = 0;
+		entry->max_buf_bytes = 0;
+	}
+}

+ 30 - 0
drivers/gpu/drm/amd/display/dc/basics/logger.h

@@ -0,0 +1,30 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_LOGGER_H__
+#define __DAL_LOGGER_H__
+
+
+#endif /* __DAL_LOGGER_H__ */

+ 307 - 0
drivers/gpu/drm/amd/display/dc/basics/vector.c

@@ -0,0 +1,307 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/vector.h"
+
+bool dal_vector_construct(
+	struct vector *vector,
+	struct dc_context *ctx,
+	uint32_t capacity,
+	uint32_t struct_size)
+{
+	vector->container = NULL;
+
+	if (!struct_size || !capacity) {
+		/* Container must be non-zero size*/
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	vector->container = kzalloc(struct_size * capacity, GFP_KERNEL);
+	if (vector->container == NULL)
+		return false;
+	vector->capacity = capacity;
+	vector->struct_size = struct_size;
+	vector->count = 0;
+	vector->ctx = ctx;
+	return true;
+}
+
+bool dal_vector_presized_costruct(
+	struct vector *vector,
+	struct dc_context *ctx,
+	uint32_t count,
+	void *initial_value,
+	uint32_t struct_size)
+{
+	uint32_t i;
+
+	vector->container = NULL;
+
+	if (!struct_size || !count) {
+		/* Container must be non-zero size*/
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	vector->container = kzalloc(struct_size * count, GFP_KERNEL);
+
+	if (vector->container == NULL)
+		return false;
+
+	/* If caller didn't supply initial value then the default
+	 * of all zeros is expected, which is exactly what dal_alloc()
+	 * initialises the memory to. */
+	if (NULL != initial_value) {
+		for (i = 0; i < count; ++i)
+			memmove(
+				vector->container + i * struct_size,
+				initial_value,
+				struct_size);
+	}
+
+	vector->capacity = count;
+	vector->struct_size = struct_size;
+	vector->count = count;
+	return true;
+}
+
+struct vector *dal_vector_presized_create(
+	struct dc_context *ctx,
+	uint32_t size,
+	void *initial_value,
+	uint32_t struct_size)
+{
+	struct vector *vector = kzalloc(sizeof(struct vector), GFP_KERNEL);
+
+	if (vector == NULL)
+		return NULL;
+
+	if (dal_vector_presized_costruct(
+		vector, ctx, size, initial_value, struct_size))
+		return vector;
+
+	BREAK_TO_DEBUGGER();
+	kfree(vector);
+	return NULL;
+}
+
+struct vector *dal_vector_create(
+	struct dc_context *ctx,
+	uint32_t capacity,
+	uint32_t struct_size)
+{
+	struct vector *vector = kzalloc(sizeof(struct vector), GFP_KERNEL);
+
+	if (vector == NULL)
+		return NULL;
+
+	if (dal_vector_construct(vector, ctx, capacity, struct_size))
+		return vector;
+
+	BREAK_TO_DEBUGGER();
+	kfree(vector);
+	return NULL;
+}
+
+void dal_vector_destruct(
+	struct vector *vector)
+{
+	kfree(vector->container);
+	vector->count = 0;
+	vector->capacity = 0;
+}
+
+void dal_vector_destroy(
+	struct vector **vector)
+{
+	if (vector == NULL || *vector == NULL)
+		return;
+	dal_vector_destruct(*vector);
+	kfree(*vector);
+	*vector = NULL;
+}
+
+uint32_t dal_vector_get_count(
+	const struct vector *vector)
+{
+	return vector->count;
+}
+
+void *dal_vector_at_index(
+	const struct vector *vector,
+	uint32_t index)
+{
+	if (vector->container == NULL || index >= vector->count)
+		return NULL;
+	return vector->container + (index * vector->struct_size);
+}
+
+bool dal_vector_remove_at_index(
+	struct vector *vector,
+	uint32_t index)
+{
+	if (index >= vector->count)
+		return false;
+
+	if (index != vector->count - 1)
+		memmove(
+			vector->container + (index * vector->struct_size),
+			vector->container + ((index + 1) * vector->struct_size),
+			(vector->count - index - 1) * vector->struct_size);
+	vector->count -= 1;
+
+	return true;
+}
+
+void dal_vector_set_at_index(
+	const struct vector *vector,
+	const void *what,
+	uint32_t index)
+{
+	void *where = dal_vector_at_index(vector, index);
+
+	if (!where) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+	memmove(
+		where,
+		what,
+		vector->struct_size);
+}
+
+static inline uint32_t calc_increased_capacity(
+	uint32_t old_capacity)
+{
+	return old_capacity * 2;
+}
+
+bool dal_vector_insert_at(
+	struct vector *vector,
+	const void *what,
+	uint32_t position)
+{
+	uint8_t *insert_address;
+
+	if (vector->count == vector->capacity) {
+		if (!dal_vector_reserve(
+			vector,
+			calc_increased_capacity(vector->capacity)))
+			return false;
+	}
+
+	insert_address = vector->container + (vector->struct_size * position);
+
+	if (vector->count && position < vector->count)
+		memmove(
+			insert_address + vector->struct_size,
+			insert_address,
+			vector->struct_size * (vector->count - position));
+
+	memmove(
+		insert_address,
+		what,
+		vector->struct_size);
+
+	vector->count++;
+
+	return true;
+}
+
+bool dal_vector_append(
+	struct vector *vector,
+	const void *item)
+{
+	return dal_vector_insert_at(vector, item, vector->count);
+}
+
+struct vector *dal_vector_clone(
+	const struct vector *vector)
+{
+	struct vector *vec_cloned;
+	uint32_t count;
+
+	/* create new vector */
+	count = dal_vector_get_count(vector);
+
+	if (count == 0)
+		/* when count is 0 we still want to create clone of the vector
+		 */
+		vec_cloned = dal_vector_create(
+			vector->ctx,
+			vector->capacity,
+			vector->struct_size);
+	else
+		/* Call "presized create" version, independently of how the
+		 * original vector was created.
+		 * The owner of original vector must know how to treat the new
+		 * vector - as "presized" or as "regular".
+		 * But from vector point of view it doesn't matter. */
+		vec_cloned = dal_vector_presized_create(vector->ctx, count,
+			NULL,/* no initial value */
+			vector->struct_size);
+
+	if (NULL == vec_cloned) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	/* copy vector's data */
+	memmove(vec_cloned->container, vector->container,
+			vec_cloned->struct_size * vec_cloned->capacity);
+
+	return vec_cloned;
+}
+
+uint32_t dal_vector_capacity(const struct vector *vector)
+{
+	return vector->capacity;
+}
+
+bool dal_vector_reserve(struct vector *vector, uint32_t capacity)
+{
+	void *new_container;
+
+	if (capacity <= vector->capacity)
+		return true;
+
+	new_container = krealloc(vector->container,
+				 capacity * vector->struct_size, GFP_KERNEL);
+
+	if (new_container) {
+		vector->container = new_container;
+		vector->capacity = capacity;
+		return true;
+	}
+
+	return false;
+}
+
+void dal_vector_clear(struct vector *vector)
+{
+	vector->count = 0;
+}

+ 27 - 0
drivers/gpu/drm/amd/display/dc/bios/Makefile

@@ -0,0 +1,27 @@
+#
+# Makefile for the 'bios' sub-component of DAL.
+# It provides the parsing and executing controls for atom bios image.
+
+BIOS = bios_parser.o bios_parser_interface.o  bios_parser_helper.o command_table.o command_table_helper.o bios_parser_common.o
+
+BIOS += command_table2.o command_table_helper2.o bios_parser2.o
+
+AMD_DAL_BIOS = $(addprefix $(AMDDALPATH)/dc/bios/,$(BIOS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_BIOS)
+
+###############################################################################
+# DCE 8x
+###############################################################################
+# All DCE8.x are derived from DCE8.0, so 8.0 MUST be defined if ANY of
+# DCE8.x is compiled.
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce80/command_table_helper_dce80.o
+
+###############################################################################
+# DCE 11x
+###############################################################################
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce110/command_table_helper_dce110.o
+
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce112/command_table_helper_dce112.o
+
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce112/command_table_helper2_dce112.o

+ 3871 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c

@@ -0,0 +1,3871 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "dc_bios_types.h"
+#include "include/gpio_service_interface.h"
+#include "include/grph_object_ctrl_defs.h"
+#include "include/bios_parser_interface.h"
+#include "include/i2caux_interface.h"
+#include "include/logger_interface.h"
+
+#include "command_table.h"
+#include "bios_parser_helper.h"
+#include "command_table_helper.h"
+#include "bios_parser.h"
+#include "bios_parser_types_internal.h"
+#include "bios_parser_interface.h"
+
+#include "bios_parser_common.h"
+/* TODO remove - only needed for default i2c speed */
+#include "dc.h"
+
+#define THREE_PERCENT_OF_10000 300
+
+#define LAST_RECORD_TYPE 0xff
+
+/* GUID to validate external display connection info table (aka OPM module) */
+static const uint8_t ext_display_connection_guid[NUMBER_OF_UCHAR_FOR_GUID] = {
+	0x91, 0x6E, 0x57, 0x09,
+	0x3F, 0x6D, 0xD2, 0x11,
+	0x39, 0x8E, 0x00, 0xA0,
+	0xC9, 0x69, 0x72, 0x3B};
+
+#define DATA_TABLES(table) (bp->master_data_tbl->ListOfDataTables.table)
+
+static void get_atom_data_table_revision(
+	ATOM_COMMON_TABLE_HEADER *atom_data_tbl,
+	struct atom_data_revision *tbl_revision);
+static uint32_t get_dst_number_from_object(struct bios_parser *bp,
+	ATOM_OBJECT *object);
+static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
+	uint16_t **id_list);
+static uint32_t get_dest_obj_list(struct bios_parser *bp,
+	ATOM_OBJECT *object, uint16_t **id_list);
+static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
+	struct graphics_object_id id);
+static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
+	ATOM_I2C_RECORD *record,
+	struct graphics_object_i2c_info *info);
+static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
+	ATOM_OBJECT *object);
+static struct device_id device_type_from_device_id(uint16_t device_id);
+static uint32_t signal_to_ss_id(enum as_signal_type signal);
+static uint32_t get_support_mask_for_device_id(struct device_id device_id);
+static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
+	struct bios_parser *bp,
+	ATOM_OBJECT *object);
+
+#define BIOS_IMAGE_SIZE_OFFSET 2
+#define BIOS_IMAGE_SIZE_UNIT 512
+
+/*****************************************************************************/
+static bool bios_parser_construct(
+	struct bios_parser *bp,
+	struct bp_init_data *init,
+	enum dce_version dce_version);
+
+static uint8_t bios_parser_get_connectors_number(
+	struct dc_bios *dcb);
+
+static enum bp_result bios_parser_get_embedded_panel_info(
+	struct dc_bios *dcb,
+	struct embedded_panel_info *info);
+
+/*****************************************************************************/
+
+struct dc_bios *bios_parser_create(
+	struct bp_init_data *init,
+	enum dce_version dce_version)
+{
+	struct bios_parser *bp = NULL;
+
+	bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL);
+	if (!bp)
+		return NULL;
+
+	if (bios_parser_construct(bp, init, dce_version))
+		return &bp->base;
+
+	kfree(bp);
+	BREAK_TO_DEBUGGER();
+	return NULL;
+}
+
+static void destruct(struct bios_parser *bp)
+{
+	kfree(bp->base.bios_local_image);
+	kfree(bp->base.integrated_info);
+}
+
+static void bios_parser_destroy(struct dc_bios **dcb)
+{
+	struct bios_parser *bp = BP_FROM_DCB(*dcb);
+
+	if (!bp) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	destruct(bp);
+
+	kfree(bp);
+	*dcb = NULL;
+}
+
+static uint8_t get_number_of_objects(struct bios_parser *bp, uint32_t offset)
+{
+	ATOM_OBJECT_TABLE *table;
+
+	uint32_t object_table_offset = bp->object_info_tbl_offset + offset;
+
+	table = GET_IMAGE(ATOM_OBJECT_TABLE, object_table_offset);
+
+	if (!table)
+		return 0;
+	else
+		return table->ucNumberOfObjects;
+}
+
+static uint8_t bios_parser_get_connectors_number(struct dc_bios *dcb)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	return get_number_of_objects(bp,
+		le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset));
+}
+
+static struct graphics_object_id bios_parser_get_encoder_id(
+	struct dc_bios *dcb,
+	uint32_t i)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	struct graphics_object_id object_id = dal_graphics_object_id_init(
+		0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+
+	uint32_t encoder_table_offset = bp->object_info_tbl_offset
+		+ le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+
+	ATOM_OBJECT_TABLE *tbl =
+		GET_IMAGE(ATOM_OBJECT_TABLE, encoder_table_offset);
+
+	if (tbl && tbl->ucNumberOfObjects > i) {
+		const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+
+		object_id = object_id_from_bios_object_id(id);
+	}
+
+	return object_id;
+}
+
+static struct graphics_object_id bios_parser_get_connector_id(
+	struct dc_bios *dcb,
+	uint8_t i)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	struct graphics_object_id object_id = dal_graphics_object_id_init(
+		0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+
+	uint32_t connector_table_offset = bp->object_info_tbl_offset
+		+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+
+	ATOM_OBJECT_TABLE *tbl =
+		GET_IMAGE(ATOM_OBJECT_TABLE, connector_table_offset);
+
+	if (tbl && tbl->ucNumberOfObjects > i) {
+		const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+
+		object_id = object_id_from_bios_object_id(id);
+	}
+
+	return object_id;
+}
+
+static uint32_t bios_parser_get_dst_number(struct dc_bios *dcb,
+	struct graphics_object_id id)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	ATOM_OBJECT *object = get_bios_object(bp, id);
+
+	return get_dst_number_from_object(bp, object);
+}
+
+static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb,
+	struct graphics_object_id object_id, uint32_t index,
+	struct graphics_object_id *src_object_id)
+{
+	uint32_t number;
+	uint16_t *id;
+	ATOM_OBJECT *object;
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!src_object_id)
+		return BP_RESULT_BADINPUT;
+
+	object = get_bios_object(bp, object_id);
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid object id */
+		return BP_RESULT_BADINPUT;
+	}
+
+	number = get_src_obj_list(bp, object, &id);
+
+	if (number <= index)
+		return BP_RESULT_BADINPUT;
+
+	*src_object_id = object_id_from_bios_object_id(id[index]);
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
+	struct graphics_object_id object_id, uint32_t index,
+	struct graphics_object_id *dest_object_id)
+{
+	uint32_t number;
+	uint16_t *id;
+	ATOM_OBJECT *object;
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!dest_object_id)
+		return BP_RESULT_BADINPUT;
+
+	object = get_bios_object(bp, object_id);
+
+	number = get_dest_obj_list(bp, object, &id);
+
+	if (number <= index)
+		return BP_RESULT_BADINPUT;
+
+	*dest_object_id = object_id_from_bios_object_id(id[index]);
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,
+	struct graphics_object_id id,
+	struct graphics_object_i2c_info *info)
+{
+	uint32_t offset;
+	ATOM_OBJECT *object;
+	ATOM_COMMON_RECORD_HEADER *header;
+	ATOM_I2C_RECORD *record;
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	object = get_bios_object(bp, id);
+
+	if (!object)
+		return BP_RESULT_BADINPUT;
+
+	offset = le16_to_cpu(object->usRecordOffset)
+			+ bp->object_info_tbl_offset;
+
+	for (;;) {
+		header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+		if (!header)
+			return BP_RESULT_BADBIOSTABLE;
+
+		if (LAST_RECORD_TYPE == header->ucRecordType ||
+			!header->ucRecordSize)
+			break;
+
+		if (ATOM_I2C_RECORD_TYPE == header->ucRecordType
+			&& sizeof(ATOM_I2C_RECORD) <= header->ucRecordSize) {
+			/* get the I2C info */
+			record = (ATOM_I2C_RECORD *) header;
+
+			if (get_gpio_i2c_info(bp, record, info) == BP_RESULT_OK)
+				return BP_RESULT_OK;
+		}
+
+		offset += header->ucRecordSize;
+	}
+
+	return BP_RESULT_NORECORD;
+}
+
+static enum bp_result get_voltage_ddc_info_v1(uint8_t *i2c_line,
+	ATOM_COMMON_TABLE_HEADER *header,
+	uint8_t *address)
+{
+	enum bp_result result = BP_RESULT_NORECORD;
+	ATOM_VOLTAGE_OBJECT_INFO *info =
+		(ATOM_VOLTAGE_OBJECT_INFO *) address;
+
+	uint8_t *voltage_current_object = (uint8_t *) &info->asVoltageObj[0];
+
+	while ((address + le16_to_cpu(header->usStructureSize)) > voltage_current_object) {
+		ATOM_VOLTAGE_OBJECT *object =
+			(ATOM_VOLTAGE_OBJECT *) voltage_current_object;
+
+		if ((object->ucVoltageType == SET_VOLTAGE_INIT_MODE) &&
+			(object->ucVoltageType &
+				VOLTAGE_CONTROLLED_BY_I2C_MASK)) {
+
+			*i2c_line = object->asControl.ucVoltageControlI2cLine
+					^ 0x90;
+			result = BP_RESULT_OK;
+			break;
+		}
+
+		voltage_current_object += object->ucSize;
+	}
+	return result;
+}
+
+static enum bp_result get_voltage_ddc_info_v3(uint8_t *i2c_line,
+	uint32_t index,
+	ATOM_COMMON_TABLE_HEADER *header,
+	uint8_t *address)
+{
+	enum bp_result result = BP_RESULT_NORECORD;
+	ATOM_VOLTAGE_OBJECT_INFO_V3_1 *info =
+		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *) address;
+
+	uint8_t *voltage_current_object =
+		(uint8_t *) (&(info->asVoltageObj[0]));
+
+	while ((address + le16_to_cpu(header->usStructureSize)) > voltage_current_object) {
+		ATOM_I2C_VOLTAGE_OBJECT_V3 *object =
+			(ATOM_I2C_VOLTAGE_OBJECT_V3 *) voltage_current_object;
+
+		if (object->sHeader.ucVoltageMode ==
+			ATOM_INIT_VOLTAGE_REGULATOR) {
+			if (object->sHeader.ucVoltageType == index) {
+				*i2c_line = object->ucVoltageControlI2cLine
+						^ 0x90;
+				result = BP_RESULT_OK;
+				break;
+			}
+		}
+
+		voltage_current_object += le16_to_cpu(object->sHeader.usSize);
+	}
+	return result;
+}
+
+static enum bp_result bios_parser_get_thermal_ddc_info(
+	struct dc_bios *dcb,
+	uint32_t i2c_channel_id,
+	struct graphics_object_i2c_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	ATOM_I2C_ID_CONFIG_ACCESS *config;
+	ATOM_I2C_RECORD record;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	config = (ATOM_I2C_ID_CONFIG_ACCESS *) &i2c_channel_id;
+
+	record.sucI2cId.bfHW_Capable = config->sbfAccess.bfHW_Capable;
+	record.sucI2cId.bfI2C_LineMux = config->sbfAccess.bfI2C_LineMux;
+	record.sucI2cId.bfHW_EngineID = config->sbfAccess.bfHW_EngineID;
+
+	return get_gpio_i2c_info(bp, &record, info);
+}
+
+static enum bp_result bios_parser_get_voltage_ddc_info(struct dc_bios *dcb,
+	uint32_t index,
+	struct graphics_object_i2c_info *info)
+{
+	uint8_t i2c_line = 0;
+	enum bp_result result = BP_RESULT_NORECORD;
+	uint8_t *voltage_info_address;
+	ATOM_COMMON_TABLE_HEADER *header;
+	struct atom_data_revision revision = {0};
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!DATA_TABLES(VoltageObjectInfo))
+		return result;
+
+	voltage_info_address = bios_get_image(&bp->base, DATA_TABLES(VoltageObjectInfo), sizeof(ATOM_COMMON_TABLE_HEADER));
+
+	header = (ATOM_COMMON_TABLE_HEADER *) voltage_info_address;
+
+	get_atom_data_table_revision(header, &revision);
+
+	switch (revision.major) {
+	case 1:
+	case 2:
+		result = get_voltage_ddc_info_v1(&i2c_line, header,
+			voltage_info_address);
+		break;
+	case 3:
+		if (revision.minor != 1)
+			break;
+		result = get_voltage_ddc_info_v3(&i2c_line, index, header,
+			voltage_info_address);
+		break;
+	}
+
+	if (result == BP_RESULT_OK)
+		result = bios_parser_get_thermal_ddc_info(dcb,
+			i2c_line, info);
+
+	return result;
+}
+
+/* TODO: temporary commented out to suppress 'defined but not used' warning */
+#if 0
+static enum bp_result bios_parser_get_ddc_info_for_i2c_line(
+	struct bios_parser *bp,
+	uint8_t i2c_line, struct graphics_object_i2c_info *info)
+{
+	uint32_t offset;
+	ATOM_OBJECT *object;
+	ATOM_OBJECT_TABLE *table;
+	uint32_t i;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	offset = le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+
+	offset += bp->object_info_tbl_offset;
+
+	table = GET_IMAGE(ATOM_OBJECT_TABLE, offset);
+
+	if (!table)
+		return BP_RESULT_BADBIOSTABLE;
+
+	for (i = 0; i < table->ucNumberOfObjects; i++) {
+		object = &table->asObjects[i];
+
+		if (!object) {
+			BREAK_TO_DEBUGGER(); /* Invalid object id */
+			return BP_RESULT_BADINPUT;
+		}
+
+		offset = le16_to_cpu(object->usRecordOffset)
+				+ bp->object_info_tbl_offset;
+
+		for (;;) {
+			ATOM_COMMON_RECORD_HEADER *header =
+				GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+			if (!header)
+				return BP_RESULT_BADBIOSTABLE;
+
+			offset += header->ucRecordSize;
+
+			if (LAST_RECORD_TYPE == header->ucRecordType ||
+				!header->ucRecordSize)
+				break;
+
+			if (ATOM_I2C_RECORD_TYPE == header->ucRecordType
+				&& sizeof(ATOM_I2C_RECORD) <=
+				header->ucRecordSize) {
+				ATOM_I2C_RECORD *record =
+					(ATOM_I2C_RECORD *) header;
+
+				if (i2c_line != record->sucI2cId.bfI2C_LineMux)
+					continue;
+
+				/* get the I2C info */
+				if (get_gpio_i2c_info(bp, record, info) ==
+					BP_RESULT_OK)
+					return BP_RESULT_OK;
+			}
+		}
+	}
+
+	return BP_RESULT_NORECORD;
+}
+#endif
+
+static enum bp_result bios_parser_get_hpd_info(struct dc_bios *dcb,
+	struct graphics_object_id id,
+	struct graphics_object_hpd_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	ATOM_OBJECT *object;
+	ATOM_HPD_INT_RECORD *record = NULL;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	object = get_bios_object(bp, id);
+
+	if (!object)
+		return BP_RESULT_BADINPUT;
+
+	record = get_hpd_record(bp, object);
+
+	if (record != NULL) {
+		info->hpd_int_gpio_uid = record->ucHPDIntGPIOID;
+		info->hpd_active = record->ucPlugged_PinState;
+		return BP_RESULT_OK;
+	}
+
+	return BP_RESULT_NORECORD;
+}
+
+static enum bp_result bios_parser_get_device_tag_record(
+	struct bios_parser *bp,
+	ATOM_OBJECT *object,
+	ATOM_CONNECTOR_DEVICE_TAG_RECORD **record)
+{
+	ATOM_COMMON_RECORD_HEADER *header;
+	uint32_t offset;
+
+	offset = le16_to_cpu(object->usRecordOffset)
+			+ bp->object_info_tbl_offset;
+
+	for (;;) {
+		header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+		if (!header)
+			return BP_RESULT_BADBIOSTABLE;
+
+		offset += header->ucRecordSize;
+
+		if (LAST_RECORD_TYPE == header->ucRecordType ||
+			!header->ucRecordSize)
+			break;
+
+		if (ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE !=
+			header->ucRecordType)
+			continue;
+
+		if (sizeof(ATOM_CONNECTOR_DEVICE_TAG) > header->ucRecordSize)
+			continue;
+
+		*record = (ATOM_CONNECTOR_DEVICE_TAG_RECORD *) header;
+		return BP_RESULT_OK;
+	}
+
+	return BP_RESULT_NORECORD;
+}
+
+static enum bp_result bios_parser_get_device_tag(
+	struct dc_bios *dcb,
+	struct graphics_object_id connector_object_id,
+	uint32_t device_tag_index,
+	struct connector_device_tag_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	ATOM_OBJECT *object;
+	ATOM_CONNECTOR_DEVICE_TAG_RECORD *record = NULL;
+	ATOM_CONNECTOR_DEVICE_TAG *device_tag;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	/* getBiosObject will return MXM object */
+	object = get_bios_object(bp, connector_object_id);
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid object id */
+		return BP_RESULT_BADINPUT;
+	}
+
+	if (bios_parser_get_device_tag_record(bp, object, &record)
+		!= BP_RESULT_OK)
+		return BP_RESULT_NORECORD;
+
+	if (device_tag_index >= record->ucNumberOfDevice)
+		return BP_RESULT_NORECORD;
+
+	device_tag = &record->asDeviceTag[device_tag_index];
+
+	info->acpi_device = le32_to_cpu(device_tag->ulACPIDeviceEnum);
+	info->dev_id =
+		device_type_from_device_id(le16_to_cpu(device_tag->usDeviceID));
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result get_firmware_info_v1_4(
+	struct bios_parser *bp,
+	struct dc_firmware_info *info);
+static enum bp_result get_firmware_info_v2_1(
+	struct bios_parser *bp,
+	struct dc_firmware_info *info);
+static enum bp_result get_firmware_info_v2_2(
+	struct bios_parser *bp,
+	struct dc_firmware_info *info);
+
+static enum bp_result bios_parser_get_firmware_info(
+	struct dc_bios *dcb,
+	struct dc_firmware_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	enum bp_result result = BP_RESULT_BADBIOSTABLE;
+	ATOM_COMMON_TABLE_HEADER *header;
+	struct atom_data_revision revision;
+
+	if (info && DATA_TABLES(FirmwareInfo)) {
+		header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+			DATA_TABLES(FirmwareInfo));
+		get_atom_data_table_revision(header, &revision);
+		switch (revision.major) {
+		case 1:
+			switch (revision.minor) {
+			case 4:
+				result = get_firmware_info_v1_4(bp, info);
+				break;
+			default:
+				break;
+			}
+			break;
+
+		case 2:
+			switch (revision.minor) {
+			case 1:
+				result = get_firmware_info_v2_1(bp, info);
+				break;
+			case 2:
+				result = get_firmware_info_v2_2(bp, info);
+				break;
+			default:
+				break;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+
+	return result;
+}
+
+static enum bp_result get_firmware_info_v1_4(
+	struct bios_parser *bp,
+	struct dc_firmware_info *info)
+{
+	ATOM_FIRMWARE_INFO_V1_4 *firmware_info =
+		GET_IMAGE(ATOM_FIRMWARE_INFO_V1_4,
+			DATA_TABLES(FirmwareInfo));
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	if (!firmware_info)
+		return BP_RESULT_BADBIOSTABLE;
+
+	memset(info, 0, sizeof(*info));
+
+	/* Pixel clock pll information. We need to convert from 10KHz units into
+	 * KHz units */
+	info->pll_info.crystal_frequency =
+		le16_to_cpu(firmware_info->usReferenceClock) * 10;
+	info->pll_info.min_input_pxl_clk_pll_frequency =
+		le16_to_cpu(firmware_info->usMinPixelClockPLL_Input) * 10;
+	info->pll_info.max_input_pxl_clk_pll_frequency =
+		le16_to_cpu(firmware_info->usMaxPixelClockPLL_Input) * 10;
+	info->pll_info.min_output_pxl_clk_pll_frequency =
+		le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
+	info->pll_info.max_output_pxl_clk_pll_frequency =
+		le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
+
+	if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
+		/* Since there is no information on the SS, report conservative
+		 * value 3% for bandwidth calculation */
+		/* unit of 0.01% */
+		info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
+
+	if (firmware_info->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
+		/* Since there is no information on the SS,report conservative
+		 * value 3% for bandwidth calculation */
+		/* unit of 0.01% */
+		info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result get_ss_info_v3_1(
+	struct bios_parser *bp,
+	uint32_t id,
+	uint32_t index,
+	struct spread_spectrum_info *ss_info);
+
+static enum bp_result get_firmware_info_v2_1(
+	struct bios_parser *bp,
+	struct dc_firmware_info *info)
+{
+	ATOM_FIRMWARE_INFO_V2_1 *firmwareInfo =
+		GET_IMAGE(ATOM_FIRMWARE_INFO_V2_1, DATA_TABLES(FirmwareInfo));
+	struct spread_spectrum_info internalSS;
+	uint32_t index;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	if (!firmwareInfo)
+		return BP_RESULT_BADBIOSTABLE;
+
+	memset(info, 0, sizeof(*info));
+
+	/* Pixel clock pll information. We need to convert from 10KHz units into
+	 * KHz units */
+	info->pll_info.crystal_frequency =
+		le16_to_cpu(firmwareInfo->usCoreReferenceClock) * 10;
+	info->pll_info.min_input_pxl_clk_pll_frequency =
+		le16_to_cpu(firmwareInfo->usMinPixelClockPLL_Input) * 10;
+	info->pll_info.max_input_pxl_clk_pll_frequency =
+		le16_to_cpu(firmwareInfo->usMaxPixelClockPLL_Input) * 10;
+	info->pll_info.min_output_pxl_clk_pll_frequency =
+		le32_to_cpu(firmwareInfo->ulMinPixelClockPLL_Output) * 10;
+	info->pll_info.max_output_pxl_clk_pll_frequency =
+		le32_to_cpu(firmwareInfo->ulMaxPixelClockPLL_Output) * 10;
+	info->default_display_engine_pll_frequency =
+		le32_to_cpu(firmwareInfo->ulDefaultDispEngineClkFreq) * 10;
+	info->external_clock_source_frequency_for_dp =
+		le16_to_cpu(firmwareInfo->usUniphyDPModeExtClkFreq) * 10;
+	info->min_allowed_bl_level = firmwareInfo->ucMinAllowedBL_Level;
+
+	/* There should be only one entry in the SS info table for Memory Clock
+	 */
+	index = 0;
+	if (firmwareInfo->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
+		/* Since there is no information for external SS, report
+		 *  conservative value 3% for bandwidth calculation */
+		/* unit of 0.01% */
+		info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
+	else if (get_ss_info_v3_1(bp,
+		ASIC_INTERNAL_MEMORY_SS, index, &internalSS) == BP_RESULT_OK) {
+		if (internalSS.spread_spectrum_percentage) {
+			info->feature.memory_clk_ss_percentage =
+				internalSS.spread_spectrum_percentage;
+			if (internalSS.type.CENTER_MODE) {
+				/* if it is centermode, the exact SS Percentage
+				 * will be round up of half of the percentage
+				 * reported in the SS table */
+				++info->feature.memory_clk_ss_percentage;
+				info->feature.memory_clk_ss_percentage /= 2;
+			}
+		}
+	}
+
+	/* There should be only one entry in the SS info table for Engine Clock
+	 */
+	index = 1;
+	if (firmwareInfo->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
+		/* Since there is no information for external SS, report
+		 * conservative value 3% for bandwidth calculation */
+		/* unit of 0.01% */
+		info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
+	else if (get_ss_info_v3_1(bp,
+		ASIC_INTERNAL_ENGINE_SS, index, &internalSS) == BP_RESULT_OK) {
+		if (internalSS.spread_spectrum_percentage) {
+			info->feature.engine_clk_ss_percentage =
+				internalSS.spread_spectrum_percentage;
+			if (internalSS.type.CENTER_MODE) {
+				/* if it is centermode, the exact SS Percentage
+				 * will be round up of half of the percentage
+				 * reported in the SS table */
+				++info->feature.engine_clk_ss_percentage;
+				info->feature.engine_clk_ss_percentage /= 2;
+			}
+		}
+	}
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result get_firmware_info_v2_2(
+	struct bios_parser *bp,
+	struct dc_firmware_info *info)
+{
+	ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
+	struct spread_spectrum_info internal_ss;
+	uint32_t index;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	firmware_info = GET_IMAGE(ATOM_FIRMWARE_INFO_V2_2,
+		DATA_TABLES(FirmwareInfo));
+
+	if (!firmware_info)
+		return BP_RESULT_BADBIOSTABLE;
+
+	memset(info, 0, sizeof(*info));
+
+	/* Pixel clock pll information. We need to convert from 10KHz units into
+	 * KHz units */
+	info->pll_info.crystal_frequency =
+		le16_to_cpu(firmware_info->usCoreReferenceClock) * 10;
+	info->pll_info.min_input_pxl_clk_pll_frequency =
+		le16_to_cpu(firmware_info->usMinPixelClockPLL_Input) * 10;
+	info->pll_info.max_input_pxl_clk_pll_frequency =
+		le16_to_cpu(firmware_info->usMaxPixelClockPLL_Input) * 10;
+	info->pll_info.min_output_pxl_clk_pll_frequency =
+		le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
+	info->pll_info.max_output_pxl_clk_pll_frequency =
+		le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
+	info->default_display_engine_pll_frequency =
+		le32_to_cpu(firmware_info->ulDefaultDispEngineClkFreq) * 10;
+	info->external_clock_source_frequency_for_dp =
+		le16_to_cpu(firmware_info->usUniphyDPModeExtClkFreq) * 10;
+
+	/* There should be only one entry in the SS info table for Memory Clock
+	 */
+	index = 0;
+	if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
+		/* Since there is no information for external SS, report
+		 *  conservative value 3% for bandwidth calculation */
+		/* unit of 0.01% */
+		info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
+	else if (get_ss_info_v3_1(bp,
+			ASIC_INTERNAL_MEMORY_SS, index, &internal_ss) == BP_RESULT_OK) {
+		if (internal_ss.spread_spectrum_percentage) {
+			info->feature.memory_clk_ss_percentage =
+					internal_ss.spread_spectrum_percentage;
+			if (internal_ss.type.CENTER_MODE) {
+				/* if it is centermode, the exact SS Percentage
+				 * will be round up of half of the percentage
+				 * reported in the SS table */
+				++info->feature.memory_clk_ss_percentage;
+				info->feature.memory_clk_ss_percentage /= 2;
+			}
+		}
+	}
+
+	/* There should be only one entry in the SS info table for Engine Clock
+	 */
+	index = 1;
+	if (firmware_info->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
+		/* Since there is no information for external SS, report
+		 * conservative value 3% for bandwidth calculation */
+		/* unit of 0.01% */
+		info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
+	else if (get_ss_info_v3_1(bp,
+			ASIC_INTERNAL_ENGINE_SS, index, &internal_ss) == BP_RESULT_OK) {
+		if (internal_ss.spread_spectrum_percentage) {
+			info->feature.engine_clk_ss_percentage =
+					internal_ss.spread_spectrum_percentage;
+			if (internal_ss.type.CENTER_MODE) {
+				/* if it is centermode, the exact SS Percentage
+				 * will be round up of half of the percentage
+				 * reported in the SS table */
+				++info->feature.engine_clk_ss_percentage;
+				info->feature.engine_clk_ss_percentage /= 2;
+			}
+		}
+	}
+
+	/* Remote Display */
+	info->remote_display_config = firmware_info->ucRemoteDisplayConfig;
+
+	/* Is allowed minimum BL level */
+	info->min_allowed_bl_level = firmware_info->ucMinAllowedBL_Level;
+	/* Used starting from CI */
+	info->smu_gpu_pll_output_freq =
+			(uint32_t) (le32_to_cpu(firmware_info->ulGPUPLL_OutputFreq) * 10);
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result get_ss_info_v3_1(
+	struct bios_parser *bp,
+	uint32_t id,
+	uint32_t index,
+	struct spread_spectrum_info *ss_info)
+{
+	ATOM_ASIC_INTERNAL_SS_INFO_V3 *ss_table_header_include;
+	ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl;
+	uint32_t table_size;
+	uint32_t i;
+	uint32_t table_index = 0;
+
+	if (!ss_info)
+		return BP_RESULT_BADINPUT;
+
+	if (!DATA_TABLES(ASIC_InternalSS_Info))
+		return BP_RESULT_UNSUPPORTED;
+
+	ss_table_header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3,
+		DATA_TABLES(ASIC_InternalSS_Info));
+	table_size =
+		(le16_to_cpu(ss_table_header_include->sHeader.usStructureSize)
+				- sizeof(ATOM_COMMON_TABLE_HEADER))
+				/ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+
+	tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *)
+				&ss_table_header_include->asSpreadSpectrum[0];
+
+	memset(ss_info, 0, sizeof(struct spread_spectrum_info));
+
+	for (i = 0; i < table_size; i++) {
+		if (tbl[i].ucClockIndication != (uint8_t) id)
+			continue;
+
+		if (table_index != index) {
+			table_index++;
+			continue;
+		}
+		/* VBIOS introduced new defines for Version 3, same values as
+		 *  before, so now use these new ones for Version 3.
+		 * Shouldn't affect field VBIOS's V3 as define values are still
+		 *  same.
+		 * #define SS_MODE_V3_CENTRE_SPREAD_MASK                0x01
+		 * #define SS_MODE_V3_EXTERNAL_SS_MASK                  0x02
+
+		 * Old VBIOS defines:
+		 * #define ATOM_SS_CENTRE_SPREAD_MODE_MASK        0x00000001
+		 * #define ATOM_EXTERNAL_SS_MASK                  0x00000002
+		 */
+
+		if (SS_MODE_V3_EXTERNAL_SS_MASK & tbl[i].ucSpreadSpectrumMode)
+			ss_info->type.EXTERNAL = true;
+
+		if (SS_MODE_V3_CENTRE_SPREAD_MASK & tbl[i].ucSpreadSpectrumMode)
+			ss_info->type.CENTER_MODE = true;
+
+		/* Older VBIOS (in field) always provides SS percentage in 0.01%
+		 * units set Divider to 100 */
+		ss_info->spread_percentage_divider = 100;
+
+		/* #define SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK 0x10 */
+		if (SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK
+				& tbl[i].ucSpreadSpectrumMode)
+			ss_info->spread_percentage_divider = 1000;
+
+		ss_info->type.STEP_AND_DELAY_INFO = false;
+		/* convert [10KHz] into [KHz] */
+		ss_info->target_clock_range =
+				le32_to_cpu(tbl[i].ulTargetClockRange) * 10;
+		ss_info->spread_spectrum_percentage =
+				(uint32_t)le16_to_cpu(tbl[i].usSpreadSpectrumPercentage);
+		ss_info->spread_spectrum_range =
+				(uint32_t)(le16_to_cpu(tbl[i].usSpreadRateIn10Hz) * 10);
+
+		return BP_RESULT_OK;
+	}
+	return BP_RESULT_NORECORD;
+}
+
+static enum bp_result bios_parser_transmitter_control(
+	struct dc_bios *dcb,
+	struct bp_transmitter_control *cntl)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.transmitter_control)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.transmitter_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_encoder_control(
+	struct dc_bios *dcb,
+	struct bp_encoder_control *cntl)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.dig_encoder_control)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.dig_encoder_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_adjust_pixel_clock(
+	struct dc_bios *dcb,
+	struct bp_adjust_pixel_clock_parameters *bp_params)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.adjust_display_pll)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.adjust_display_pll(bp, bp_params);
+}
+
+static enum bp_result bios_parser_set_pixel_clock(
+	struct dc_bios *dcb,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.set_pixel_clock)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.set_pixel_clock(bp, bp_params);
+}
+
+static enum bp_result bios_parser_set_dce_clock(
+	struct dc_bios *dcb,
+	struct bp_set_dce_clock_parameters *bp_params)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.set_dce_clock)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.set_dce_clock(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_spread_spectrum_on_ppll(
+	struct dc_bios *dcb,
+	struct bp_spread_spectrum_parameters *bp_params,
+	bool enable)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.enable_spread_spectrum_on_ppll)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.enable_spread_spectrum_on_ppll(
+			bp, bp_params, enable);
+
+}
+
+static enum bp_result bios_parser_program_crtc_timing(
+	struct dc_bios *dcb,
+	struct bp_hw_crtc_timing_parameters *bp_params)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.set_crtc_timing)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.set_crtc_timing(bp, bp_params);
+}
+
+static enum bp_result bios_parser_program_display_engine_pll(
+	struct dc_bios *dcb,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.program_clock)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.program_clock(bp, bp_params);
+
+}
+
+
+static enum bp_result bios_parser_enable_crtc(
+	struct dc_bios *dcb,
+	enum controller_id id,
+	bool enable)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.enable_crtc)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.enable_crtc(bp, id, enable);
+}
+
+static enum bp_result bios_parser_crtc_source_select(
+	struct dc_bios *dcb,
+	struct bp_crtc_source_select *bp_params)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.select_crtc_source)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.select_crtc_source(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_disp_power_gating(
+	struct dc_bios *dcb,
+	enum controller_id controller_id,
+	enum bp_pipe_control_action action)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.enable_disp_power_gating)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.enable_disp_power_gating(bp, controller_id,
+		action);
+}
+
+static bool bios_parser_is_device_id_supported(
+	struct dc_bios *dcb,
+	struct device_id id)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	uint32_t mask = get_support_mask_for_device_id(id);
+
+	return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0;
+}
+
+static enum bp_result bios_parser_crt_control(
+	struct dc_bios *dcb,
+	enum engine_id engine_id,
+	bool enable,
+	uint32_t pixel_clock)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	uint8_t standard;
+
+	if (!bp->cmd_tbl.dac1_encoder_control &&
+		engine_id == ENGINE_ID_DACA)
+		return BP_RESULT_FAILURE;
+	if (!bp->cmd_tbl.dac2_encoder_control &&
+		engine_id == ENGINE_ID_DACB)
+		return BP_RESULT_FAILURE;
+	/* validate params */
+	switch (engine_id) {
+	case ENGINE_ID_DACA:
+	case ENGINE_ID_DACB:
+		break;
+	default:
+		/* unsupported engine */
+		return BP_RESULT_FAILURE;
+	}
+
+	standard = ATOM_DAC1_PS2; /* == ATOM_DAC2_PS2 */
+
+	if (enable) {
+		if (engine_id == ENGINE_ID_DACA) {
+			bp->cmd_tbl.dac1_encoder_control(bp, enable,
+				pixel_clock, standard);
+			if (bp->cmd_tbl.dac1_output_control != NULL)
+				bp->cmd_tbl.dac1_output_control(bp, enable);
+		} else {
+			bp->cmd_tbl.dac2_encoder_control(bp, enable,
+				pixel_clock, standard);
+			if (bp->cmd_tbl.dac2_output_control != NULL)
+				bp->cmd_tbl.dac2_output_control(bp, enable);
+		}
+	} else {
+		if (engine_id == ENGINE_ID_DACA) {
+			if (bp->cmd_tbl.dac1_output_control != NULL)
+				bp->cmd_tbl.dac1_output_control(bp, enable);
+			bp->cmd_tbl.dac1_encoder_control(bp, enable,
+				pixel_clock, standard);
+		} else {
+			if (bp->cmd_tbl.dac2_output_control != NULL)
+				bp->cmd_tbl.dac2_output_control(bp, enable);
+			bp->cmd_tbl.dac2_encoder_control(bp, enable,
+				pixel_clock, standard);
+		}
+	}
+
+	return BP_RESULT_OK;
+}
+
+static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
+	ATOM_OBJECT *object)
+{
+	ATOM_COMMON_RECORD_HEADER *header;
+	uint32_t offset;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid object */
+		return NULL;
+	}
+
+	offset = le16_to_cpu(object->usRecordOffset)
+			+ bp->object_info_tbl_offset;
+
+	for (;;) {
+		header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+		if (!header)
+			return NULL;
+
+		if (LAST_RECORD_TYPE == header->ucRecordType ||
+			!header->ucRecordSize)
+			break;
+
+		if (ATOM_HPD_INT_RECORD_TYPE == header->ucRecordType
+			&& sizeof(ATOM_HPD_INT_RECORD) <= header->ucRecordSize)
+			return (ATOM_HPD_INT_RECORD *) header;
+
+		offset += header->ucRecordSize;
+	}
+
+	return NULL;
+}
+
+/**
+ * Get I2C information of input object id
+ *
+ * search all records to find the ATOM_I2C_RECORD_TYPE record IR
+ */
+static ATOM_I2C_RECORD *get_i2c_record(
+	struct bios_parser *bp,
+	ATOM_OBJECT *object)
+{
+	uint32_t offset;
+	ATOM_COMMON_RECORD_HEADER *record_header;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER();
+		/* Invalid object */
+		return NULL;
+	}
+
+	offset = le16_to_cpu(object->usRecordOffset)
+			+ bp->object_info_tbl_offset;
+
+	for (;;) {
+		record_header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+		if (!record_header)
+			return NULL;
+
+		if (LAST_RECORD_TYPE == record_header->ucRecordType ||
+			0 == record_header->ucRecordSize)
+			break;
+
+		if (ATOM_I2C_RECORD_TYPE == record_header->ucRecordType &&
+			sizeof(ATOM_I2C_RECORD) <=
+			record_header->ucRecordSize) {
+			return (ATOM_I2C_RECORD *)record_header;
+		}
+
+		offset += record_header->ucRecordSize;
+	}
+
+	return NULL;
+}
+
+static enum bp_result get_ss_info_from_ss_info_table(
+	struct bios_parser *bp,
+	uint32_t id,
+	struct spread_spectrum_info *ss_info);
+static enum bp_result get_ss_info_from_tbl(
+	struct bios_parser *bp,
+	uint32_t id,
+	struct spread_spectrum_info *ss_info);
+/**
+ * bios_parser_get_spread_spectrum_info
+ * Get spread spectrum information from the ASIC_InternalSS_Info(ver 2.1 or
+ * ver 3.1) or SS_Info table from the VBIOS. Currently ASIC_InternalSS_Info
+ * ver 2.1 can co-exist with SS_Info table. Expect ASIC_InternalSS_Info ver 3.1,
+ * there is only one entry for each signal /ss id.  However, there is
+ * no planning of supporting multiple spread Sprectum entry for EverGreen
+ * @param [in] this
+ * @param [in] signal, ASSignalType to be converted to info index
+ * @param [in] index, number of entries that match the converted info index
+ * @param [out] ss_info, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result bios_parser_get_spread_spectrum_info(
+	struct dc_bios *dcb,
+	enum as_signal_type signal,
+	uint32_t index,
+	struct spread_spectrum_info *ss_info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	enum bp_result result = BP_RESULT_UNSUPPORTED;
+	uint32_t clk_id_ss = 0;
+	ATOM_COMMON_TABLE_HEADER *header;
+	struct atom_data_revision tbl_revision;
+
+	if (!ss_info) /* check for bad input */
+		return BP_RESULT_BADINPUT;
+	/* signal translation */
+	clk_id_ss = signal_to_ss_id(signal);
+
+	if (!DATA_TABLES(ASIC_InternalSS_Info))
+		if (!index)
+			return get_ss_info_from_ss_info_table(bp, clk_id_ss,
+				ss_info);
+
+	header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+		DATA_TABLES(ASIC_InternalSS_Info));
+	get_atom_data_table_revision(header, &tbl_revision);
+
+	switch (tbl_revision.major) {
+	case 2:
+		switch (tbl_revision.minor) {
+		case 1:
+			/* there can not be more then one entry for Internal
+			 * SS Info table version 2.1 */
+			if (!index)
+				return get_ss_info_from_tbl(bp, clk_id_ss,
+						ss_info);
+			break;
+		default:
+			break;
+		}
+		break;
+
+	case 3:
+		switch (tbl_revision.minor) {
+		case 1:
+			return get_ss_info_v3_1(bp, clk_id_ss, index, ss_info);
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+	/* there can not be more then one entry for SS Info table */
+	return result;
+}
+
+static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
+	struct bios_parser *bp,
+	uint32_t id,
+	struct spread_spectrum_info *info);
+
+/**
+ * get_ss_info_from_table
+ * Get spread sprectrum information from the ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info table from the VBIOS
+ * There can not be more than 1 entry for  ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info.
+ *
+ * @param this
+ * @param id, spread sprectrum info index
+ * @param pSSinfo, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result get_ss_info_from_tbl(
+	struct bios_parser *bp,
+	uint32_t id,
+	struct spread_spectrum_info *ss_info)
+{
+	if (!ss_info) /* check for bad input, if ss_info is not NULL */
+		return BP_RESULT_BADINPUT;
+	/* for SS_Info table only support DP and LVDS */
+	if (id == ASIC_INTERNAL_SS_ON_DP || id == ASIC_INTERNAL_SS_ON_LVDS)
+		return get_ss_info_from_ss_info_table(bp, id, ss_info);
+	else
+		return get_ss_info_from_internal_ss_info_tbl_V2_1(bp, id,
+			ss_info);
+}
+
+/**
+ * get_ss_info_from_internal_ss_info_tbl_V2_1
+ * Get spread sprectrum information from the ASIC_InternalSS_Info table Ver 2.1
+ * from the VBIOS
+ * There will not be multiple entry for Ver 2.1
+ *
+ * @param id, spread sprectrum info index
+ * @param pSSinfo, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
+	struct bios_parser *bp,
+	uint32_t id,
+	struct spread_spectrum_info *info)
+{
+	enum bp_result result = BP_RESULT_UNSUPPORTED;
+	ATOM_ASIC_INTERNAL_SS_INFO_V2 *header;
+	ATOM_ASIC_SS_ASSIGNMENT_V2 *tbl;
+	uint32_t tbl_size, i;
+
+	if (!DATA_TABLES(ASIC_InternalSS_Info))
+		return result;
+
+	header = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2,
+		DATA_TABLES(ASIC_InternalSS_Info));
+
+	memset(info, 0, sizeof(struct spread_spectrum_info));
+
+	tbl_size = (le16_to_cpu(header->sHeader.usStructureSize)
+			- sizeof(ATOM_COMMON_TABLE_HEADER))
+					/ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+
+	tbl = (ATOM_ASIC_SS_ASSIGNMENT_V2 *)
+					&(header->asSpreadSpectrum[0]);
+	for (i = 0; i < tbl_size; i++) {
+		result = BP_RESULT_NORECORD;
+
+		if (tbl[i].ucClockIndication != (uint8_t)id)
+			continue;
+
+		if (ATOM_EXTERNAL_SS_MASK
+			& tbl[i].ucSpreadSpectrumMode) {
+			info->type.EXTERNAL = true;
+		}
+		if (ATOM_SS_CENTRE_SPREAD_MODE_MASK
+			& tbl[i].ucSpreadSpectrumMode) {
+			info->type.CENTER_MODE = true;
+		}
+		info->type.STEP_AND_DELAY_INFO = false;
+		/* convert [10KHz] into [KHz] */
+		info->target_clock_range =
+			le32_to_cpu(tbl[i].ulTargetClockRange) * 10;
+		info->spread_spectrum_percentage =
+			(uint32_t)le16_to_cpu(tbl[i].usSpreadSpectrumPercentage);
+		info->spread_spectrum_range =
+			(uint32_t)(le16_to_cpu(tbl[i].usSpreadRateIn10Hz) * 10);
+		result = BP_RESULT_OK;
+		break;
+	}
+
+	return result;
+
+}
+
+/**
+ * get_ss_info_from_ss_info_table
+ * Get spread sprectrum information from the SS_Info table from the VBIOS
+ * if the pointer to info is NULL, indicate the caller what to know the number
+ * of entries that matches the id
+ * for, the SS_Info table, there should not be more than 1 entry match.
+ *
+ * @param [in] id, spread sprectrum id
+ * @param [out] pSSinfo, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result get_ss_info_from_ss_info_table(
+	struct bios_parser *bp,
+	uint32_t id,
+	struct spread_spectrum_info *ss_info)
+{
+	enum bp_result result = BP_RESULT_UNSUPPORTED;
+	ATOM_SPREAD_SPECTRUM_INFO *tbl;
+	ATOM_COMMON_TABLE_HEADER *header;
+	uint32_t table_size;
+	uint32_t i;
+	uint32_t id_local = SS_ID_UNKNOWN;
+	struct atom_data_revision revision;
+
+	/* exist of the SS_Info table */
+	/* check for bad input, pSSinfo can not be NULL */
+	if (!DATA_TABLES(SS_Info) || !ss_info)
+		return result;
+
+	header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(SS_Info));
+	get_atom_data_table_revision(header, &revision);
+
+	tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info));
+
+	if (1 != revision.major || 2 > revision.minor)
+		return result;
+
+	/* have to convert from Internal_SS format to SS_Info format */
+	switch (id) {
+	case ASIC_INTERNAL_SS_ON_DP:
+		id_local = SS_ID_DP1;
+		break;
+	case ASIC_INTERNAL_SS_ON_LVDS:
+	{
+		struct embedded_panel_info panel_info;
+
+		if (bios_parser_get_embedded_panel_info(&bp->base, &panel_info)
+				== BP_RESULT_OK)
+			id_local = panel_info.ss_id;
+		break;
+	}
+	default:
+		break;
+	}
+
+	if (id_local == SS_ID_UNKNOWN)
+		return result;
+
+	table_size = (le16_to_cpu(tbl->sHeader.usStructureSize) -
+			sizeof(ATOM_COMMON_TABLE_HEADER)) /
+					sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
+
+	for (i = 0; i < table_size; i++) {
+		if (id_local != (uint32_t)tbl->asSS_Info[i].ucSS_Id)
+			continue;
+
+		memset(ss_info, 0, sizeof(struct spread_spectrum_info));
+
+		if (ATOM_EXTERNAL_SS_MASK &
+				tbl->asSS_Info[i].ucSpreadSpectrumType)
+			ss_info->type.EXTERNAL = true;
+
+		if (ATOM_SS_CENTRE_SPREAD_MODE_MASK &
+				tbl->asSS_Info[i].ucSpreadSpectrumType)
+			ss_info->type.CENTER_MODE = true;
+
+		ss_info->type.STEP_AND_DELAY_INFO = true;
+		ss_info->spread_spectrum_percentage =
+			(uint32_t)le16_to_cpu(tbl->asSS_Info[i].usSpreadSpectrumPercentage);
+		ss_info->step_and_delay_info.step = tbl->asSS_Info[i].ucSS_Step;
+		ss_info->step_and_delay_info.delay =
+			tbl->asSS_Info[i].ucSS_Delay;
+		ss_info->step_and_delay_info.recommended_ref_div =
+			tbl->asSS_Info[i].ucRecommendedRef_Div;
+		ss_info->spread_spectrum_range =
+			(uint32_t)tbl->asSS_Info[i].ucSS_Range * 10000;
+
+		/* there will be only one entry for each display type in SS_info
+		 * table */
+		result = BP_RESULT_OK;
+		break;
+	}
+
+	return result;
+}
+static enum bp_result get_embedded_panel_info_v1_2(
+	struct bios_parser *bp,
+	struct embedded_panel_info *info);
+static enum bp_result get_embedded_panel_info_v1_3(
+	struct bios_parser *bp,
+	struct embedded_panel_info *info);
+
+static enum bp_result bios_parser_get_embedded_panel_info(
+	struct dc_bios *dcb,
+	struct embedded_panel_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	ATOM_COMMON_TABLE_HEADER *hdr;
+
+	if (!DATA_TABLES(LCD_Info))
+		return BP_RESULT_FAILURE;
+
+	hdr = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(LCD_Info));
+
+	if (!hdr)
+		return BP_RESULT_BADBIOSTABLE;
+
+	switch (hdr->ucTableFormatRevision) {
+	case 1:
+		switch (hdr->ucTableContentRevision) {
+		case 0:
+		case 1:
+		case 2:
+			return get_embedded_panel_info_v1_2(bp, info);
+		case 3:
+			return get_embedded_panel_info_v1_3(bp, info);
+		default:
+			break;
+		}
+	default:
+		break;
+	}
+
+	return BP_RESULT_FAILURE;
+}
+
+static enum bp_result get_embedded_panel_info_v1_2(
+	struct bios_parser *bp,
+	struct embedded_panel_info *info)
+{
+	ATOM_LVDS_INFO_V12 *lvds;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	if (!DATA_TABLES(LVDS_Info))
+		return BP_RESULT_UNSUPPORTED;
+
+	lvds =
+		GET_IMAGE(ATOM_LVDS_INFO_V12, DATA_TABLES(LVDS_Info));
+
+	if (!lvds)
+		return BP_RESULT_BADBIOSTABLE;
+
+	if (1 != lvds->sHeader.ucTableFormatRevision
+		|| 2 > lvds->sHeader.ucTableContentRevision)
+		return BP_RESULT_UNSUPPORTED;
+
+	memset(info, 0, sizeof(struct embedded_panel_info));
+
+	/* We need to convert from 10KHz units into KHz units*/
+	info->lcd_timing.pixel_clk =
+		le16_to_cpu(lvds->sLCDTiming.usPixClk) * 10;
+	/* usHActive does not include borders, according to VBIOS team*/
+	info->lcd_timing.horizontal_addressable =
+		le16_to_cpu(lvds->sLCDTiming.usHActive);
+	/* usHBlanking_Time includes borders, so we should really be subtracting
+	 * borders duing this translation, but LVDS generally*/
+	/* doesn't have borders, so we should be okay leaving this as is for
+	 * now.  May need to revisit if we ever have LVDS with borders*/
+	info->lcd_timing.horizontal_blanking_time =
+			le16_to_cpu(lvds->sLCDTiming.usHBlanking_Time);
+	/* usVActive does not include borders, according to VBIOS team*/
+	info->lcd_timing.vertical_addressable =
+			le16_to_cpu(lvds->sLCDTiming.usVActive);
+	/* usVBlanking_Time includes borders, so we should really be subtracting
+	 * borders duing this translation, but LVDS generally*/
+	/* doesn't have borders, so we should be okay leaving this as is for
+	 * now. May need to revisit if we ever have LVDS with borders*/
+	info->lcd_timing.vertical_blanking_time =
+		le16_to_cpu(lvds->sLCDTiming.usVBlanking_Time);
+	info->lcd_timing.horizontal_sync_offset =
+		le16_to_cpu(lvds->sLCDTiming.usHSyncOffset);
+	info->lcd_timing.horizontal_sync_width =
+		le16_to_cpu(lvds->sLCDTiming.usHSyncWidth);
+	info->lcd_timing.vertical_sync_offset =
+		le16_to_cpu(lvds->sLCDTiming.usVSyncOffset);
+	info->lcd_timing.vertical_sync_width =
+		le16_to_cpu(lvds->sLCDTiming.usVSyncWidth);
+	info->lcd_timing.horizontal_border = lvds->sLCDTiming.ucHBorder;
+	info->lcd_timing.vertical_border = lvds->sLCDTiming.ucVBorder;
+	info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HorizontalCutOff;
+	info->lcd_timing.misc_info.H_SYNC_POLARITY =
+		~(uint32_t)
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HSyncPolarity;
+	info->lcd_timing.misc_info.V_SYNC_POLARITY =
+		~(uint32_t)
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VSyncPolarity;
+	info->lcd_timing.misc_info.VERTICAL_CUT_OFF =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VerticalCutOff;
+	info->lcd_timing.misc_info.H_REPLICATION_BY2 =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.H_ReplicationBy2;
+	info->lcd_timing.misc_info.V_REPLICATION_BY2 =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.V_ReplicationBy2;
+	info->lcd_timing.misc_info.COMPOSITE_SYNC =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.CompositeSync;
+	info->lcd_timing.misc_info.INTERLACE =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.Interlace;
+	info->lcd_timing.misc_info.DOUBLE_CLOCK =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.DoubleClock;
+	info->ss_id = lvds->ucSS_Id;
+
+	{
+		uint8_t rr = le16_to_cpu(lvds->usSupportedRefreshRate);
+		/* Get minimum supported refresh rate*/
+		if (SUPPORTED_LCD_REFRESHRATE_30Hz & rr)
+			info->supported_rr.REFRESH_RATE_30HZ = 1;
+		else if (SUPPORTED_LCD_REFRESHRATE_40Hz & rr)
+			info->supported_rr.REFRESH_RATE_40HZ = 1;
+		else if (SUPPORTED_LCD_REFRESHRATE_48Hz & rr)
+			info->supported_rr.REFRESH_RATE_48HZ = 1;
+		else if (SUPPORTED_LCD_REFRESHRATE_50Hz & rr)
+			info->supported_rr.REFRESH_RATE_50HZ = 1;
+		else if (SUPPORTED_LCD_REFRESHRATE_60Hz & rr)
+			info->supported_rr.REFRESH_RATE_60HZ = 1;
+	}
+
+	/*Drr panel support can be reported by VBIOS*/
+	if (LCDPANEL_CAP_DRR_SUPPORTED
+			& lvds->ucLCDPanel_SpecialHandlingCap)
+		info->drr_enabled = 1;
+
+	if (ATOM_PANEL_MISC_DUAL & lvds->ucLVDS_Misc)
+		info->lcd_timing.misc_info.DOUBLE_CLOCK = true;
+
+	if (ATOM_PANEL_MISC_888RGB & lvds->ucLVDS_Misc)
+		info->lcd_timing.misc_info.RGB888 = true;
+
+	info->lcd_timing.misc_info.GREY_LEVEL =
+		(uint32_t) (ATOM_PANEL_MISC_GREY_LEVEL &
+			lvds->ucLVDS_Misc) >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT;
+
+	if (ATOM_PANEL_MISC_SPATIAL & lvds->ucLVDS_Misc)
+		info->lcd_timing.misc_info.SPATIAL = true;
+
+	if (ATOM_PANEL_MISC_TEMPORAL & lvds->ucLVDS_Misc)
+		info->lcd_timing.misc_info.TEMPORAL = true;
+
+	if (ATOM_PANEL_MISC_API_ENABLED & lvds->ucLVDS_Misc)
+		info->lcd_timing.misc_info.API_ENABLED = true;
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result get_embedded_panel_info_v1_3(
+	struct bios_parser *bp,
+	struct embedded_panel_info *info)
+{
+	ATOM_LCD_INFO_V13 *lvds;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	if (!DATA_TABLES(LCD_Info))
+		return BP_RESULT_UNSUPPORTED;
+
+	lvds = GET_IMAGE(ATOM_LCD_INFO_V13, DATA_TABLES(LCD_Info));
+
+	if (!lvds)
+		return BP_RESULT_BADBIOSTABLE;
+
+	if (!((1 == lvds->sHeader.ucTableFormatRevision)
+			&& (3 <= lvds->sHeader.ucTableContentRevision)))
+		return BP_RESULT_UNSUPPORTED;
+
+	memset(info, 0, sizeof(struct embedded_panel_info));
+
+	/* We need to convert from 10KHz units into KHz units */
+	info->lcd_timing.pixel_clk =
+			le16_to_cpu(lvds->sLCDTiming.usPixClk) * 10;
+	/* usHActive does not include borders, according to VBIOS team */
+	info->lcd_timing.horizontal_addressable =
+			le16_to_cpu(lvds->sLCDTiming.usHActive);
+	/* usHBlanking_Time includes borders, so we should really be subtracting
+	 * borders duing this translation, but LVDS generally*/
+	/* doesn't have borders, so we should be okay leaving this as is for
+	 * now.  May need to revisit if we ever have LVDS with borders*/
+	info->lcd_timing.horizontal_blanking_time =
+		le16_to_cpu(lvds->sLCDTiming.usHBlanking_Time);
+	/* usVActive does not include borders, according to VBIOS team*/
+	info->lcd_timing.vertical_addressable =
+		le16_to_cpu(lvds->sLCDTiming.usVActive);
+	/* usVBlanking_Time includes borders, so we should really be subtracting
+	 * borders duing this translation, but LVDS generally*/
+	/* doesn't have borders, so we should be okay leaving this as is for
+	 * now. May need to revisit if we ever have LVDS with borders*/
+	info->lcd_timing.vertical_blanking_time =
+		le16_to_cpu(lvds->sLCDTiming.usVBlanking_Time);
+	info->lcd_timing.horizontal_sync_offset =
+		le16_to_cpu(lvds->sLCDTiming.usHSyncOffset);
+	info->lcd_timing.horizontal_sync_width =
+		le16_to_cpu(lvds->sLCDTiming.usHSyncWidth);
+	info->lcd_timing.vertical_sync_offset =
+		le16_to_cpu(lvds->sLCDTiming.usVSyncOffset);
+	info->lcd_timing.vertical_sync_width =
+		le16_to_cpu(lvds->sLCDTiming.usVSyncWidth);
+	info->lcd_timing.horizontal_border = lvds->sLCDTiming.ucHBorder;
+	info->lcd_timing.vertical_border = lvds->sLCDTiming.ucVBorder;
+	info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HorizontalCutOff;
+	info->lcd_timing.misc_info.H_SYNC_POLARITY =
+		~(uint32_t)
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HSyncPolarity;
+	info->lcd_timing.misc_info.V_SYNC_POLARITY =
+		~(uint32_t)
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VSyncPolarity;
+	info->lcd_timing.misc_info.VERTICAL_CUT_OFF =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VerticalCutOff;
+	info->lcd_timing.misc_info.H_REPLICATION_BY2 =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.H_ReplicationBy2;
+	info->lcd_timing.misc_info.V_REPLICATION_BY2 =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.V_ReplicationBy2;
+	info->lcd_timing.misc_info.COMPOSITE_SYNC =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.CompositeSync;
+	info->lcd_timing.misc_info.INTERLACE =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.Interlace;
+	info->lcd_timing.misc_info.DOUBLE_CLOCK =
+		lvds->sLCDTiming.susModeMiscInfo.sbfAccess.DoubleClock;
+	info->ss_id = lvds->ucSS_Id;
+
+	/* Drr panel support can be reported by VBIOS*/
+	if (LCDPANEL_CAP_V13_DRR_SUPPORTED
+			& lvds->ucLCDPanel_SpecialHandlingCap)
+		info->drr_enabled = 1;
+
+	/* Get supported refresh rate*/
+	if (info->drr_enabled == 1) {
+		uint8_t min_rr =
+				lvds->sRefreshRateSupport.ucMinRefreshRateForDRR;
+		uint8_t rr = lvds->sRefreshRateSupport.ucSupportedRefreshRate;
+
+		if (min_rr != 0) {
+			if (SUPPORTED_LCD_REFRESHRATE_30Hz & min_rr)
+				info->supported_rr.REFRESH_RATE_30HZ = 1;
+			else if (SUPPORTED_LCD_REFRESHRATE_40Hz & min_rr)
+				info->supported_rr.REFRESH_RATE_40HZ = 1;
+			else if (SUPPORTED_LCD_REFRESHRATE_48Hz & min_rr)
+				info->supported_rr.REFRESH_RATE_48HZ = 1;
+			else if (SUPPORTED_LCD_REFRESHRATE_50Hz & min_rr)
+				info->supported_rr.REFRESH_RATE_50HZ = 1;
+			else if (SUPPORTED_LCD_REFRESHRATE_60Hz & min_rr)
+				info->supported_rr.REFRESH_RATE_60HZ = 1;
+		} else {
+			if (SUPPORTED_LCD_REFRESHRATE_30Hz & rr)
+				info->supported_rr.REFRESH_RATE_30HZ = 1;
+			else if (SUPPORTED_LCD_REFRESHRATE_40Hz & rr)
+				info->supported_rr.REFRESH_RATE_40HZ = 1;
+			else if (SUPPORTED_LCD_REFRESHRATE_48Hz & rr)
+				info->supported_rr.REFRESH_RATE_48HZ = 1;
+			else if (SUPPORTED_LCD_REFRESHRATE_50Hz & rr)
+				info->supported_rr.REFRESH_RATE_50HZ = 1;
+			else if (SUPPORTED_LCD_REFRESHRATE_60Hz & rr)
+				info->supported_rr.REFRESH_RATE_60HZ = 1;
+		}
+	}
+
+	if (ATOM_PANEL_MISC_V13_DUAL & lvds->ucLCD_Misc)
+		info->lcd_timing.misc_info.DOUBLE_CLOCK = true;
+
+	if (ATOM_PANEL_MISC_V13_8BIT_PER_COLOR & lvds->ucLCD_Misc)
+		info->lcd_timing.misc_info.RGB888 = true;
+
+	info->lcd_timing.misc_info.GREY_LEVEL =
+			(uint32_t) (ATOM_PANEL_MISC_V13_GREY_LEVEL &
+				lvds->ucLCD_Misc) >> ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT;
+
+	return BP_RESULT_OK;
+}
+
+/**
+ * bios_parser_get_encoder_cap_info
+ *
+ * @brief
+ *  Get encoder capability information of input object id
+ *
+ * @param object_id, Object id
+ * @param object_id, encoder cap information structure
+ *
+ * @return Bios parser result code
+ *
+ */
+static enum bp_result bios_parser_get_encoder_cap_info(
+	struct dc_bios *dcb,
+	struct graphics_object_id object_id,
+	struct bp_encoder_cap_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	ATOM_OBJECT *object;
+	ATOM_ENCODER_CAP_RECORD_V2 *record = NULL;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	object = get_bios_object(bp, object_id);
+
+	if (!object)
+		return BP_RESULT_BADINPUT;
+
+	record = get_encoder_cap_record(bp, object);
+	if (!record)
+		return BP_RESULT_NORECORD;
+
+	info->DP_HBR2_EN = record->usHBR2En;
+	info->DP_HBR3_EN = record->usHBR3En;
+	info->HDMI_6GB_EN = record->usHDMI6GEn;
+	return BP_RESULT_OK;
+}
+
+/**
+ * get_encoder_cap_record
+ *
+ * @brief
+ *  Get encoder cap record for the object
+ *
+ * @param object, ATOM object
+ *
+ * @return atom encoder cap record
+ *
+ * @note
+ *  search all records to find the ATOM_ENCODER_CAP_RECORD_V2 record
+ */
+static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
+	struct bios_parser *bp,
+	ATOM_OBJECT *object)
+{
+	ATOM_COMMON_RECORD_HEADER *header;
+	uint32_t offset;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid object */
+		return NULL;
+	}
+
+	offset = le16_to_cpu(object->usRecordOffset)
+					+ bp->object_info_tbl_offset;
+
+	for (;;) {
+		header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+		if (!header)
+			return NULL;
+
+		offset += header->ucRecordSize;
+
+		if (LAST_RECORD_TYPE == header->ucRecordType ||
+				!header->ucRecordSize)
+			break;
+
+		if (ATOM_ENCODER_CAP_RECORD_TYPE != header->ucRecordType)
+			continue;
+
+		if (sizeof(ATOM_ENCODER_CAP_RECORD_V2) <= header->ucRecordSize)
+			return (ATOM_ENCODER_CAP_RECORD_V2 *)header;
+	}
+
+	return NULL;
+}
+
+static uint32_t get_ss_entry_number(
+	struct bios_parser *bp,
+	uint32_t id);
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
+	struct bios_parser *bp,
+	uint32_t id);
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+	struct bios_parser *bp,
+	uint32_t id);
+static uint32_t get_ss_entry_number_from_ss_info_tbl(
+	struct bios_parser *bp,
+	uint32_t id);
+
+/**
+ * BiosParserObject::GetNumberofSpreadSpectrumEntry
+ * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table from
+ * the VBIOS that match the SSid (to be converted from signal)
+ *
+ * @param[in] signal, ASSignalType to be converted to SSid
+ * @return number of SS Entry that match the signal
+ */
+static uint32_t bios_parser_get_ss_entry_number(
+	struct dc_bios *dcb,
+	enum as_signal_type signal)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	uint32_t ss_id = 0;
+	ATOM_COMMON_TABLE_HEADER *header;
+	struct atom_data_revision revision;
+
+	ss_id = signal_to_ss_id(signal);
+
+	if (!DATA_TABLES(ASIC_InternalSS_Info))
+		return get_ss_entry_number_from_ss_info_tbl(bp, ss_id);
+
+	header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+			DATA_TABLES(ASIC_InternalSS_Info));
+	get_atom_data_table_revision(header, &revision);
+
+	switch (revision.major) {
+	case 2:
+		switch (revision.minor) {
+		case 1:
+			return get_ss_entry_number(bp, ss_id);
+		default:
+			break;
+		}
+		break;
+	case 3:
+		switch (revision.minor) {
+		case 1:
+			return
+				get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+						bp, ss_id);
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ * get_ss_entry_number_from_ss_info_tbl
+ * Get Number of spread spectrum entry from the SS_Info table from the VBIOS.
+ *
+ * @note There can only be one entry for each id for SS_Info Table
+ *
+ * @param [in] id, spread spectrum id
+ * @return number of SS Entry that match the id
+ */
+static uint32_t get_ss_entry_number_from_ss_info_tbl(
+	struct bios_parser *bp,
+	uint32_t id)
+{
+	ATOM_SPREAD_SPECTRUM_INFO *tbl;
+	ATOM_COMMON_TABLE_HEADER *header;
+	uint32_t table_size;
+	uint32_t i;
+	uint32_t number = 0;
+	uint32_t id_local = SS_ID_UNKNOWN;
+	struct atom_data_revision revision;
+
+	/* SS_Info table exist */
+	if (!DATA_TABLES(SS_Info))
+		return number;
+
+	header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+			DATA_TABLES(SS_Info));
+	get_atom_data_table_revision(header, &revision);
+
+	tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO,
+			DATA_TABLES(SS_Info));
+
+	if (1 != revision.major || 2 > revision.minor)
+		return number;
+
+	/* have to convert from Internal_SS format to SS_Info format */
+	switch (id) {
+	case ASIC_INTERNAL_SS_ON_DP:
+		id_local = SS_ID_DP1;
+		break;
+	case ASIC_INTERNAL_SS_ON_LVDS: {
+		struct embedded_panel_info panel_info;
+
+		if (bios_parser_get_embedded_panel_info(&bp->base, &panel_info)
+				== BP_RESULT_OK)
+			id_local = panel_info.ss_id;
+		break;
+	}
+	default:
+		break;
+	}
+
+	if (id_local == SS_ID_UNKNOWN)
+		return number;
+
+	table_size = (le16_to_cpu(tbl->sHeader.usStructureSize) -
+			sizeof(ATOM_COMMON_TABLE_HEADER)) /
+					sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
+
+	for (i = 0; i < table_size; i++)
+		if (id_local == (uint32_t)tbl->asSS_Info[i].ucSS_Id) {
+			number = 1;
+			break;
+		}
+
+	return number;
+}
+
+/**
+ * get_ss_entry_number
+ * Get spread sprectrum information from the ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info table from the VBIOS
+ * There can not be more than 1 entry for  ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info.
+ *
+ * @param id, spread sprectrum info index
+ * @return Bios parser result code
+ */
+static uint32_t get_ss_entry_number(struct bios_parser *bp, uint32_t id)
+{
+	if (id == ASIC_INTERNAL_SS_ON_DP || id == ASIC_INTERNAL_SS_ON_LVDS)
+		return get_ss_entry_number_from_ss_info_tbl(bp, id);
+
+	return get_ss_entry_number_from_internal_ss_info_tbl_v2_1(bp, id);
+}
+
+/**
+ * get_ss_entry_number_from_internal_ss_info_tbl_v2_1
+ * Get NUmber of spread sprectrum entry from the ASIC_InternalSS_Info table
+ * Ver 2.1 from the VBIOS
+ * There will not be multiple entry for Ver 2.1
+ *
+ * @param id, spread sprectrum info index
+ * @return number of SS Entry that match the id
+ */
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
+	struct bios_parser *bp,
+	uint32_t id)
+{
+	ATOM_ASIC_INTERNAL_SS_INFO_V2 *header_include;
+	ATOM_ASIC_SS_ASSIGNMENT_V2 *tbl;
+	uint32_t size;
+	uint32_t i;
+
+	if (!DATA_TABLES(ASIC_InternalSS_Info))
+		return 0;
+
+	header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2,
+			DATA_TABLES(ASIC_InternalSS_Info));
+
+	size = (le16_to_cpu(header_include->sHeader.usStructureSize)
+			- sizeof(ATOM_COMMON_TABLE_HEADER))
+						/ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+
+	tbl = (ATOM_ASIC_SS_ASSIGNMENT_V2 *)
+				&header_include->asSpreadSpectrum[0];
+	for (i = 0; i < size; i++)
+		if (tbl[i].ucClockIndication == (uint8_t)id)
+			return 1;
+
+	return 0;
+}
+/**
+ * get_ss_entry_number_from_internal_ss_info_table_V3_1
+ * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of
+ * the VBIOS that matches id
+ *
+ * @param[in]  id, spread sprectrum id
+ * @return number of SS Entry that match the id
+ */
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+	struct bios_parser *bp,
+	uint32_t id)
+{
+	uint32_t number = 0;
+	ATOM_ASIC_INTERNAL_SS_INFO_V3 *header_include;
+	ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl;
+	uint32_t size;
+	uint32_t i;
+
+	if (!DATA_TABLES(ASIC_InternalSS_Info))
+		return number;
+
+	header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3,
+			DATA_TABLES(ASIC_InternalSS_Info));
+	size = (le16_to_cpu(header_include->sHeader.usStructureSize) -
+			sizeof(ATOM_COMMON_TABLE_HEADER)) /
+					sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+
+	tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *)
+				&header_include->asSpreadSpectrum[0];
+
+	for (i = 0; i < size; i++)
+		if (tbl[i].ucClockIndication == (uint8_t)id)
+			number++;
+
+	return number;
+}
+
+/**
+ * bios_parser_get_gpio_pin_info
+ * Get GpioPin information of input gpio id
+ *
+ * @param gpio_id, GPIO ID
+ * @param info, GpioPin information structure
+ * @return Bios parser result code
+ * @note
+ *  to get the GPIO PIN INFO, we need:
+ *  1. get the GPIO_ID from other object table, see GetHPDInfo()
+ *  2. in DATA_TABLE.GPIO_Pin_LUT, search all records, to get the registerA
+ *  offset/mask
+ */
+static enum bp_result bios_parser_get_gpio_pin_info(
+	struct dc_bios *dcb,
+	uint32_t gpio_id,
+	struct gpio_pin_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	ATOM_GPIO_PIN_LUT *header;
+	uint32_t count = 0;
+	uint32_t i = 0;
+
+	if (!DATA_TABLES(GPIO_Pin_LUT))
+		return BP_RESULT_BADBIOSTABLE;
+
+	header = GET_IMAGE(ATOM_GPIO_PIN_LUT, DATA_TABLES(GPIO_Pin_LUT));
+	if (!header)
+		return BP_RESULT_BADBIOSTABLE;
+
+	if (sizeof(ATOM_COMMON_TABLE_HEADER) + sizeof(ATOM_GPIO_PIN_LUT)
+			> le16_to_cpu(header->sHeader.usStructureSize))
+		return BP_RESULT_BADBIOSTABLE;
+
+	if (1 != header->sHeader.ucTableContentRevision)
+		return BP_RESULT_UNSUPPORTED;
+
+	count = (le16_to_cpu(header->sHeader.usStructureSize)
+			- sizeof(ATOM_COMMON_TABLE_HEADER))
+				/ sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+	for (i = 0; i < count; ++i) {
+		if (header->asGPIO_Pin[i].ucGPIO_ID != gpio_id)
+			continue;
+
+		info->offset =
+			(uint32_t) le16_to_cpu(header->asGPIO_Pin[i].usGpioPin_AIndex);
+		info->offset_y = info->offset + 2;
+		info->offset_en = info->offset + 1;
+		info->offset_mask = info->offset - 1;
+
+		info->mask = (uint32_t) (1 <<
+			header->asGPIO_Pin[i].ucGpioPinBitShift);
+		info->mask_y = info->mask + 2;
+		info->mask_en = info->mask + 1;
+		info->mask_mask = info->mask - 1;
+
+		return BP_RESULT_OK;
+	}
+
+	return BP_RESULT_NORECORD;
+}
+
+static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
+	ATOM_I2C_RECORD *record,
+	struct graphics_object_i2c_info *info)
+{
+	ATOM_GPIO_I2C_INFO *header;
+	uint32_t count = 0;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	/* get the GPIO_I2C info */
+	if (!DATA_TABLES(GPIO_I2C_Info))
+		return BP_RESULT_BADBIOSTABLE;
+
+	header = GET_IMAGE(ATOM_GPIO_I2C_INFO, DATA_TABLES(GPIO_I2C_Info));
+	if (!header)
+		return BP_RESULT_BADBIOSTABLE;
+
+	if (sizeof(ATOM_COMMON_TABLE_HEADER) + sizeof(ATOM_GPIO_I2C_ASSIGMENT)
+			> le16_to_cpu(header->sHeader.usStructureSize))
+		return BP_RESULT_BADBIOSTABLE;
+
+	if (1 != header->sHeader.ucTableContentRevision)
+		return BP_RESULT_UNSUPPORTED;
+
+	/* get data count */
+	count = (le16_to_cpu(header->sHeader.usStructureSize)
+			- sizeof(ATOM_COMMON_TABLE_HEADER))
+				/ sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+	if (count < record->sucI2cId.bfI2C_LineMux)
+		return BP_RESULT_BADBIOSTABLE;
+
+	/* get the GPIO_I2C_INFO */
+	info->i2c_hw_assist = record->sucI2cId.bfHW_Capable;
+	info->i2c_line = record->sucI2cId.bfI2C_LineMux;
+	info->i2c_engine_id = record->sucI2cId.bfHW_EngineID;
+	info->i2c_slave_address = record->ucI2CAddr;
+
+	info->gpio_info.clk_mask_register_index =
+			le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkMaskRegisterIndex);
+	info->gpio_info.clk_en_register_index =
+			le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkEnRegisterIndex);
+	info->gpio_info.clk_y_register_index =
+			le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkY_RegisterIndex);
+	info->gpio_info.clk_a_register_index =
+			le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkA_RegisterIndex);
+	info->gpio_info.data_mask_register_index =
+			le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataMaskRegisterIndex);
+	info->gpio_info.data_en_register_index =
+			le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataEnRegisterIndex);
+	info->gpio_info.data_y_register_index =
+			le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataY_RegisterIndex);
+	info->gpio_info.data_a_register_index =
+			le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataA_RegisterIndex);
+
+	info->gpio_info.clk_mask_shift =
+			header->asGPIO_Info[info->i2c_line].ucClkMaskShift;
+	info->gpio_info.clk_en_shift =
+			header->asGPIO_Info[info->i2c_line].ucClkEnShift;
+	info->gpio_info.clk_y_shift =
+			header->asGPIO_Info[info->i2c_line].ucClkY_Shift;
+	info->gpio_info.clk_a_shift =
+			header->asGPIO_Info[info->i2c_line].ucClkA_Shift;
+	info->gpio_info.data_mask_shift =
+			header->asGPIO_Info[info->i2c_line].ucDataMaskShift;
+	info->gpio_info.data_en_shift =
+			header->asGPIO_Info[info->i2c_line].ucDataEnShift;
+	info->gpio_info.data_y_shift =
+			header->asGPIO_Info[info->i2c_line].ucDataY_Shift;
+	info->gpio_info.data_a_shift =
+			header->asGPIO_Info[info->i2c_line].ucDataA_Shift;
+
+	return BP_RESULT_OK;
+}
+
+static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
+	struct graphics_object_id id)
+{
+	uint32_t offset;
+	ATOM_OBJECT_TABLE *tbl;
+	uint32_t i;
+
+	switch (id.type) {
+	case OBJECT_TYPE_ENCODER:
+		offset = le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+		break;
+
+	case OBJECT_TYPE_CONNECTOR:
+		offset = le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+		break;
+
+	case OBJECT_TYPE_ROUTER:
+		offset = le16_to_cpu(bp->object_info_tbl.v1_1->usRouterObjectTableOffset);
+		break;
+
+	case OBJECT_TYPE_GENERIC:
+		if (bp->object_info_tbl.revision.minor < 3)
+			return NULL;
+		offset = le16_to_cpu(bp->object_info_tbl.v1_3->usMiscObjectTableOffset);
+		break;
+
+	default:
+		return NULL;
+	}
+
+	offset += bp->object_info_tbl_offset;
+
+	tbl = GET_IMAGE(ATOM_OBJECT_TABLE, offset);
+	if (!tbl)
+		return NULL;
+
+	for (i = 0; i < tbl->ucNumberOfObjects; i++)
+		if (dal_graphics_object_id_is_equal(id,
+				object_id_from_bios_object_id(
+						le16_to_cpu(tbl->asObjects[i].usObjectID))))
+			return &tbl->asObjects[i];
+
+	return NULL;
+}
+
+static uint32_t get_dest_obj_list(struct bios_parser *bp,
+	ATOM_OBJECT *object, uint16_t **id_list)
+{
+	uint32_t offset;
+	uint8_t *number;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid object id */
+		return 0;
+	}
+
+	offset = le16_to_cpu(object->usSrcDstTableOffset)
+						+ bp->object_info_tbl_offset;
+
+	number = GET_IMAGE(uint8_t, offset);
+	if (!number)
+		return 0;
+
+	offset += sizeof(uint8_t);
+	offset += sizeof(uint16_t) * (*number);
+
+	number = GET_IMAGE(uint8_t, offset);
+	if ((!number) || (!*number))
+		return 0;
+
+	offset += sizeof(uint8_t);
+	*id_list = (uint16_t *)bios_get_image(&bp->base, offset, *number * sizeof(uint16_t));
+
+	if (!*id_list)
+		return 0;
+
+	return *number;
+}
+
+static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
+	uint16_t **id_list)
+{
+	uint32_t offset;
+	uint8_t *number;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid object id */
+		return 0;
+	}
+
+	offset = le16_to_cpu(object->usSrcDstTableOffset)
+					+ bp->object_info_tbl_offset;
+
+	number = GET_IMAGE(uint8_t, offset);
+	if (!number)
+		return 0;
+
+	offset += sizeof(uint8_t);
+	*id_list = (uint16_t *)bios_get_image(&bp->base, offset, *number * sizeof(uint16_t));
+
+	if (!*id_list)
+		return 0;
+
+	return *number;
+}
+
+static uint32_t get_dst_number_from_object(struct bios_parser *bp,
+	ATOM_OBJECT *object)
+{
+	uint32_t offset;
+	uint8_t *number;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid encoder object id*/
+		return 0;
+	}
+
+	offset = le16_to_cpu(object->usSrcDstTableOffset)
+					+ bp->object_info_tbl_offset;
+
+	number = GET_IMAGE(uint8_t, offset);
+	if (!number)
+		return 0;
+
+	offset += sizeof(uint8_t);
+	offset += sizeof(uint16_t) * (*number);
+
+	number = GET_IMAGE(uint8_t, offset);
+
+	if (!number)
+		return 0;
+
+	return *number;
+}
+
+static struct device_id device_type_from_device_id(uint16_t device_id)
+{
+
+	struct device_id result_device_id;
+
+	switch (device_id) {
+	case ATOM_DEVICE_LCD1_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_LCD;
+		result_device_id.enum_id = 1;
+		break;
+
+	case ATOM_DEVICE_LCD2_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_LCD;
+		result_device_id.enum_id = 2;
+		break;
+
+	case ATOM_DEVICE_CRT1_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_CRT;
+		result_device_id.enum_id = 1;
+		break;
+
+	case ATOM_DEVICE_CRT2_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_CRT;
+		result_device_id.enum_id = 2;
+		break;
+
+	case ATOM_DEVICE_DFP1_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 1;
+		break;
+
+	case ATOM_DEVICE_DFP2_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 2;
+		break;
+
+	case ATOM_DEVICE_DFP3_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 3;
+		break;
+
+	case ATOM_DEVICE_DFP4_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 4;
+		break;
+
+	case ATOM_DEVICE_DFP5_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 5;
+		break;
+
+	case ATOM_DEVICE_DFP6_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 6;
+		break;
+
+	default:
+		BREAK_TO_DEBUGGER(); /* Invalid device Id */
+		result_device_id.device_type = DEVICE_TYPE_UNKNOWN;
+		result_device_id.enum_id = 0;
+	}
+	return result_device_id;
+}
+
+static void get_atom_data_table_revision(
+	ATOM_COMMON_TABLE_HEADER *atom_data_tbl,
+	struct atom_data_revision *tbl_revision)
+{
+	if (!tbl_revision)
+		return;
+
+	/* initialize the revision to 0 which is invalid revision */
+	tbl_revision->major = 0;
+	tbl_revision->minor = 0;
+
+	if (!atom_data_tbl)
+		return;
+
+	tbl_revision->major =
+			(uint32_t) GET_DATA_TABLE_MAJOR_REVISION(atom_data_tbl);
+	tbl_revision->minor =
+			(uint32_t) GET_DATA_TABLE_MINOR_REVISION(atom_data_tbl);
+}
+
+static uint32_t signal_to_ss_id(enum as_signal_type signal)
+{
+	uint32_t clk_id_ss = 0;
+
+	switch (signal) {
+	case AS_SIGNAL_TYPE_DVI:
+		clk_id_ss = ASIC_INTERNAL_SS_ON_TMDS;
+		break;
+	case AS_SIGNAL_TYPE_HDMI:
+		clk_id_ss = ASIC_INTERNAL_SS_ON_HDMI;
+		break;
+	case AS_SIGNAL_TYPE_LVDS:
+		clk_id_ss = ASIC_INTERNAL_SS_ON_LVDS;
+		break;
+	case AS_SIGNAL_TYPE_DISPLAY_PORT:
+		clk_id_ss = ASIC_INTERNAL_SS_ON_DP;
+		break;
+	case AS_SIGNAL_TYPE_GPU_PLL:
+		clk_id_ss = ASIC_INTERNAL_GPUPLL_SS;
+		break;
+	default:
+		break;
+	}
+	return clk_id_ss;
+}
+
+static uint32_t get_support_mask_for_device_id(struct device_id device_id)
+{
+	enum dal_device_type device_type = device_id.device_type;
+	uint32_t enum_id = device_id.enum_id;
+
+	switch (device_type) {
+	case DEVICE_TYPE_LCD:
+		switch (enum_id) {
+		case 1:
+			return ATOM_DEVICE_LCD1_SUPPORT;
+		case 2:
+			return ATOM_DEVICE_LCD2_SUPPORT;
+		default:
+			break;
+		}
+		break;
+	case DEVICE_TYPE_CRT:
+		switch (enum_id) {
+		case 1:
+			return ATOM_DEVICE_CRT1_SUPPORT;
+		case 2:
+			return ATOM_DEVICE_CRT2_SUPPORT;
+		default:
+			break;
+		}
+		break;
+	case DEVICE_TYPE_DFP:
+		switch (enum_id) {
+		case 1:
+			return ATOM_DEVICE_DFP1_SUPPORT;
+		case 2:
+			return ATOM_DEVICE_DFP2_SUPPORT;
+		case 3:
+			return ATOM_DEVICE_DFP3_SUPPORT;
+		case 4:
+			return ATOM_DEVICE_DFP4_SUPPORT;
+		case 5:
+			return ATOM_DEVICE_DFP5_SUPPORT;
+		case 6:
+			return ATOM_DEVICE_DFP6_SUPPORT;
+		default:
+			break;
+		}
+		break;
+	case DEVICE_TYPE_CV:
+		switch (enum_id) {
+		case 1:
+			return ATOM_DEVICE_CV_SUPPORT;
+		default:
+			break;
+		}
+		break;
+	case DEVICE_TYPE_TV:
+		switch (enum_id) {
+		case 1:
+			return ATOM_DEVICE_TV1_SUPPORT;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	};
+
+	/* Unidentified device ID, return empty support mask. */
+	return 0;
+}
+
+/**
+ *  HwContext interface for writing MM registers
+ */
+
+static bool i2c_read(
+	struct bios_parser *bp,
+	struct graphics_object_i2c_info *i2c_info,
+	uint8_t *buffer,
+	uint32_t length)
+{
+	struct ddc *ddc;
+	uint8_t offset[2] = { 0, 0 };
+	bool result = false;
+	struct i2c_command cmd;
+	struct gpio_ddc_hw_info hw_info = {
+		i2c_info->i2c_hw_assist,
+		i2c_info->i2c_line };
+
+	ddc = dal_gpio_create_ddc(bp->base.ctx->gpio_service,
+		i2c_info->gpio_info.clk_a_register_index,
+		(1 << i2c_info->gpio_info.clk_a_shift), &hw_info);
+
+	if (!ddc)
+		return result;
+
+	/*Using SW engine */
+	cmd.engine = I2C_COMMAND_ENGINE_SW;
+	cmd.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
+
+	{
+		struct i2c_payload payloads[] = {
+				{
+						.address = i2c_info->i2c_slave_address >> 1,
+						.data = offset,
+						.length = sizeof(offset),
+						.write = true
+				},
+				{
+						.address = i2c_info->i2c_slave_address >> 1,
+						.data = buffer,
+						.length = length,
+						.write = false
+				}
+		};
+
+		cmd.payloads = payloads;
+		cmd.number_of_payloads = ARRAY_SIZE(payloads);
+
+		/* TODO route this through drm i2c_adapter */
+		result = dal_i2caux_submit_i2c_command(
+				ddc->ctx->i2caux,
+				ddc,
+				&cmd);
+	}
+
+	dal_gpio_destroy_ddc(&ddc);
+
+	return result;
+}
+
+/**
+ * Read external display connection info table through i2c.
+ * validate the GUID and checksum.
+ *
+ * @return enum bp_result whether all data was sucessfully read
+ */
+static enum bp_result get_ext_display_connection_info(
+	struct bios_parser *bp,
+	ATOM_OBJECT *opm_object,
+	ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO *ext_display_connection_info_tbl)
+{
+	bool config_tbl_present = false;
+	ATOM_I2C_RECORD *i2c_record = NULL;
+	uint32_t i = 0;
+
+	if (opm_object == NULL)
+		return BP_RESULT_BADINPUT;
+
+	i2c_record = get_i2c_record(bp, opm_object);
+
+	if (i2c_record != NULL) {
+		ATOM_GPIO_I2C_INFO *gpio_i2c_header;
+		struct graphics_object_i2c_info i2c_info;
+
+		gpio_i2c_header = GET_IMAGE(ATOM_GPIO_I2C_INFO,
+				bp->master_data_tbl->ListOfDataTables.GPIO_I2C_Info);
+
+		if (NULL == gpio_i2c_header)
+			return BP_RESULT_BADBIOSTABLE;
+
+		if (get_gpio_i2c_info(bp, i2c_record, &i2c_info) !=
+				BP_RESULT_OK)
+			return BP_RESULT_BADBIOSTABLE;
+
+		if (i2c_read(bp,
+			     &i2c_info,
+			     (uint8_t *)ext_display_connection_info_tbl,
+			     sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO))) {
+			config_tbl_present = true;
+		}
+	}
+
+	/* Validate GUID */
+	if (config_tbl_present)
+		for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; i++) {
+			if (ext_display_connection_info_tbl->ucGuid[i]
+			    != ext_display_connection_guid[i]) {
+				config_tbl_present = false;
+				break;
+			}
+		}
+
+	/* Validate checksum */
+	if (config_tbl_present) {
+		uint8_t check_sum = 0;
+		uint8_t *buf =
+				(uint8_t *)ext_display_connection_info_tbl;
+
+		for (i = 0; i < sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO);
+				i++) {
+			check_sum += buf[i];
+		}
+
+		if (check_sum != 0)
+			config_tbl_present = false;
+	}
+
+	if (config_tbl_present)
+		return BP_RESULT_OK;
+	else
+		return BP_RESULT_FAILURE;
+}
+
+/*
+ * Gets the first device ID in the same group as the given ID for enumerating.
+ * For instance, if any DFP device ID is passed, returns the device ID for DFP1.
+ *
+ * The first device ID in the same group as the passed device ID, or 0 if no
+ * matching device group found.
+ */
+static uint32_t enum_first_device_id(uint32_t dev_id)
+{
+	/* Return the first in the group that this ID belongs to. */
+	if (dev_id & ATOM_DEVICE_CRT_SUPPORT)
+		return ATOM_DEVICE_CRT1_SUPPORT;
+	else if (dev_id & ATOM_DEVICE_DFP_SUPPORT)
+		return ATOM_DEVICE_DFP1_SUPPORT;
+	else if (dev_id & ATOM_DEVICE_LCD_SUPPORT)
+		return ATOM_DEVICE_LCD1_SUPPORT;
+	else if (dev_id & ATOM_DEVICE_TV_SUPPORT)
+		return ATOM_DEVICE_TV1_SUPPORT;
+	else if (dev_id & ATOM_DEVICE_CV_SUPPORT)
+		return ATOM_DEVICE_CV_SUPPORT;
+
+	/* No group found for this device ID. */
+
+	dm_error("%s: incorrect input %d\n", __func__, dev_id);
+	/* No matching support flag for given device ID */
+	return 0;
+}
+
+/*
+ * Gets the next device ID in the group for a given device ID.
+ *
+ * The current device ID being enumerated on.
+ *
+ * The next device ID in the group, or 0 if no device exists.
+ */
+static uint32_t enum_next_dev_id(uint32_t dev_id)
+{
+	/* Get next device ID in the group. */
+	switch (dev_id) {
+	case ATOM_DEVICE_CRT1_SUPPORT:
+		return ATOM_DEVICE_CRT2_SUPPORT;
+	case ATOM_DEVICE_LCD1_SUPPORT:
+		return ATOM_DEVICE_LCD2_SUPPORT;
+	case ATOM_DEVICE_DFP1_SUPPORT:
+		return ATOM_DEVICE_DFP2_SUPPORT;
+	case ATOM_DEVICE_DFP2_SUPPORT:
+		return ATOM_DEVICE_DFP3_SUPPORT;
+	case ATOM_DEVICE_DFP3_SUPPORT:
+		return ATOM_DEVICE_DFP4_SUPPORT;
+	case ATOM_DEVICE_DFP4_SUPPORT:
+		return ATOM_DEVICE_DFP5_SUPPORT;
+	case ATOM_DEVICE_DFP5_SUPPORT:
+		return ATOM_DEVICE_DFP6_SUPPORT;
+	}
+
+	/* Done enumerating through devices. */
+	return 0;
+}
+
+/*
+ * Returns the new device tag record for patched BIOS object.
+ *
+ * [IN] pExtDisplayPath - External display path to copy device tag from.
+ * [IN] deviceSupport - Bit vector for device ID support flags.
+ * [OUT] pDeviceTag - Device tag structure to fill with patched data.
+ *
+ * True if a compatible device ID was found, false otherwise.
+ */
+static bool get_patched_device_tag(
+	struct bios_parser *bp,
+	EXT_DISPLAY_PATH *ext_display_path,
+	uint32_t device_support,
+	ATOM_CONNECTOR_DEVICE_TAG *device_tag)
+{
+	uint32_t dev_id;
+	/* Use fallback behaviour if not supported. */
+	if (!bp->remap_device_tags) {
+		device_tag->ulACPIDeviceEnum =
+				cpu_to_le32((uint32_t) le16_to_cpu(ext_display_path->usDeviceACPIEnum));
+		device_tag->usDeviceID =
+				cpu_to_le16(le16_to_cpu(ext_display_path->usDeviceTag));
+		return true;
+	}
+
+	/* Find the first unused in the same group. */
+	dev_id = enum_first_device_id(le16_to_cpu(ext_display_path->usDeviceTag));
+	while (dev_id != 0) {
+		/* Assign this device ID if supported. */
+		if ((device_support & dev_id) != 0) {
+			device_tag->ulACPIDeviceEnum =
+					cpu_to_le32((uint32_t) le16_to_cpu(ext_display_path->usDeviceACPIEnum));
+			device_tag->usDeviceID = cpu_to_le16((USHORT) dev_id);
+			return true;
+		}
+
+		dev_id = enum_next_dev_id(dev_id);
+	}
+
+	/* No compatible device ID found. */
+	return false;
+}
+
+/*
+ * Adds a device tag to a BIOS object's device tag record if there is
+ * matching device ID supported.
+ *
+ * pObject - Pointer to the BIOS object to add the device tag to.
+ * pExtDisplayPath - Display path to retrieve base device ID from.
+ * pDeviceSupport - Pointer to bit vector for supported device IDs.
+ */
+static void add_device_tag_from_ext_display_path(
+	struct bios_parser *bp,
+	ATOM_OBJECT *object,
+	EXT_DISPLAY_PATH *ext_display_path,
+	uint32_t *device_support)
+{
+	/* Get device tag record for object. */
+	ATOM_CONNECTOR_DEVICE_TAG *device_tag = NULL;
+	ATOM_CONNECTOR_DEVICE_TAG_RECORD *device_tag_record = NULL;
+	enum bp_result result =
+			bios_parser_get_device_tag_record(
+					bp, object, &device_tag_record);
+
+	if ((le16_to_cpu(ext_display_path->usDeviceTag) != CONNECTOR_OBJECT_ID_NONE)
+			&& (result == BP_RESULT_OK)) {
+		uint8_t index;
+
+		if ((device_tag_record->ucNumberOfDevice == 1) &&
+				(le16_to_cpu(device_tag_record->asDeviceTag[0].usDeviceID) == 0)) {
+			/*Workaround bug in current VBIOS releases where
+			 * ucNumberOfDevice = 1 but there is no actual device
+			 * tag data. This w/a is temporary until the updated
+			 * VBIOS is distributed. */
+			device_tag_record->ucNumberOfDevice =
+					device_tag_record->ucNumberOfDevice - 1;
+		}
+
+		/* Attempt to find a matching device ID. */
+		index = device_tag_record->ucNumberOfDevice;
+		device_tag = &device_tag_record->asDeviceTag[index];
+		if (get_patched_device_tag(
+				bp,
+				ext_display_path,
+				*device_support,
+				device_tag)) {
+			/* Update cached device support to remove assigned ID.
+			 */
+			*device_support &= ~le16_to_cpu(device_tag->usDeviceID);
+			device_tag_record->ucNumberOfDevice++;
+		}
+	}
+}
+
+/*
+ * Read out a single EXT_DISPLAY_PATH from the external display connection info
+ * table. The specific entry in the table is determined by the enum_id passed
+ * in.
+ *
+ * EXT_DISPLAY_PATH describing a single Configuration table entry
+ */
+
+#define INVALID_CONNECTOR 0xffff
+
+static EXT_DISPLAY_PATH *get_ext_display_path_entry(
+	ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO *config_table,
+	uint32_t bios_object_id)
+{
+	EXT_DISPLAY_PATH *ext_display_path;
+	uint32_t ext_display_path_index =
+			((bios_object_id & ENUM_ID_MASK) >> ENUM_ID_SHIFT) - 1;
+
+	if (ext_display_path_index >= MAX_NUMBER_OF_EXT_DISPLAY_PATH)
+		return NULL;
+
+	ext_display_path = &config_table->sPath[ext_display_path_index];
+
+	if (le16_to_cpu(ext_display_path->usDeviceConnector) == INVALID_CONNECTOR)
+		ext_display_path->usDeviceConnector = cpu_to_le16(0);
+
+	return ext_display_path;
+}
+
+/*
+ * Get AUX/DDC information of input object id
+ *
+ * search all records to find the ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE record
+ * IR
+ */
+static ATOM_CONNECTOR_AUXDDC_LUT_RECORD *get_ext_connector_aux_ddc_lut_record(
+	struct bios_parser *bp,
+	ATOM_OBJECT *object)
+{
+	uint32_t offset;
+	ATOM_COMMON_RECORD_HEADER *header;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER();
+		/* Invalid object */
+		return NULL;
+	}
+
+	offset = le16_to_cpu(object->usRecordOffset)
+					+ bp->object_info_tbl_offset;
+
+	for (;;) {
+		header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+		if (!header)
+			return NULL;
+
+		if (LAST_RECORD_TYPE == header->ucRecordType ||
+				0 == header->ucRecordSize)
+			break;
+
+		if (ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE ==
+				header->ucRecordType &&
+				sizeof(ATOM_CONNECTOR_AUXDDC_LUT_RECORD) <=
+				header->ucRecordSize)
+			return (ATOM_CONNECTOR_AUXDDC_LUT_RECORD *)(header);
+
+		offset += header->ucRecordSize;
+	}
+
+	return NULL;
+}
+
+/*
+ * Get AUX/DDC information of input object id
+ *
+ * search all records to find the ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE record
+ * IR
+ */
+static ATOM_CONNECTOR_HPDPIN_LUT_RECORD *get_ext_connector_hpd_pin_lut_record(
+	struct bios_parser *bp,
+	ATOM_OBJECT *object)
+{
+	uint32_t offset;
+	ATOM_COMMON_RECORD_HEADER *header;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER();
+		/* Invalid object */
+		return NULL;
+	}
+
+	offset = le16_to_cpu(object->usRecordOffset)
+					+ bp->object_info_tbl_offset;
+
+	for (;;) {
+		header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+		if (!header)
+			return NULL;
+
+		if (LAST_RECORD_TYPE == header->ucRecordType ||
+				0 == header->ucRecordSize)
+			break;
+
+		if (ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE ==
+				header->ucRecordType &&
+				sizeof(ATOM_CONNECTOR_HPDPIN_LUT_RECORD) <=
+				header->ucRecordSize)
+			return (ATOM_CONNECTOR_HPDPIN_LUT_RECORD *)header;
+
+		offset += header->ucRecordSize;
+	}
+
+	return NULL;
+}
+
+/*
+ * Check whether we need to patch the VBIOS connector info table with
+ * data from an external display connection info table.  This is
+ * necessary to support MXM boards with an OPM (output personality
+ * module).  With these designs, the VBIOS connector info table
+ * specifies an MXM_CONNECTOR with a unique ID.  The driver retrieves
+ * the external connection info table through i2c and then looks up the
+ * connector ID to find the real connector type (e.g. DFP1).
+ *
+ */
+static enum bp_result patch_bios_image_from_ext_display_connection_info(
+	struct bios_parser *bp)
+{
+	ATOM_OBJECT_TABLE *connector_tbl;
+	uint32_t connector_tbl_offset;
+	struct graphics_object_id object_id;
+	ATOM_OBJECT *object;
+	ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO ext_display_connection_info_tbl;
+	EXT_DISPLAY_PATH *ext_display_path;
+	ATOM_CONNECTOR_AUXDDC_LUT_RECORD *aux_ddc_lut_record = NULL;
+	ATOM_I2C_RECORD *i2c_record = NULL;
+	ATOM_CONNECTOR_HPDPIN_LUT_RECORD *hpd_pin_lut_record = NULL;
+	ATOM_HPD_INT_RECORD *hpd_record = NULL;
+	ATOM_OBJECT_TABLE *encoder_table;
+	uint32_t encoder_table_offset;
+	ATOM_OBJECT *opm_object = NULL;
+	uint32_t i = 0;
+	struct graphics_object_id opm_object_id =
+			dal_graphics_object_id_init(
+					GENERIC_ID_MXM_OPM,
+					ENUM_ID_1,
+					OBJECT_TYPE_GENERIC);
+	ATOM_CONNECTOR_DEVICE_TAG_RECORD *dev_tag_record;
+	uint32_t cached_device_support =
+			le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport);
+
+	uint32_t dst_number;
+	uint16_t *dst_object_id_list;
+
+	opm_object = get_bios_object(bp, opm_object_id);
+	if (!opm_object)
+		return BP_RESULT_UNSUPPORTED;
+
+	memset(&ext_display_connection_info_tbl, 0,
+			sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO));
+
+	connector_tbl_offset = bp->object_info_tbl_offset
+			+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+	connector_tbl = GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+
+	/* Read Connector info table from EEPROM through i2c */
+	if (get_ext_display_connection_info(bp,
+					    opm_object,
+					    &ext_display_connection_info_tbl) != BP_RESULT_OK) {
+
+		dm_logger_write(bp->base.ctx->logger, LOG_WARNING,
+				"%s: Failed to read Connection Info Table", __func__);
+		return BP_RESULT_UNSUPPORTED;
+	}
+
+	/* Get pointer to AUX/DDC and HPD LUTs */
+	aux_ddc_lut_record =
+			get_ext_connector_aux_ddc_lut_record(bp, opm_object);
+	hpd_pin_lut_record =
+			get_ext_connector_hpd_pin_lut_record(bp, opm_object);
+
+	if ((aux_ddc_lut_record == NULL) || (hpd_pin_lut_record == NULL))
+		return BP_RESULT_UNSUPPORTED;
+
+	/* Cache support bits for currently unmapped device types. */
+	if (bp->remap_device_tags) {
+		for (i = 0; i < connector_tbl->ucNumberOfObjects; ++i) {
+			uint32_t j;
+			/* Remove support for all non-MXM connectors. */
+			object = &connector_tbl->asObjects[i];
+			object_id = object_id_from_bios_object_id(
+					le16_to_cpu(object->usObjectID));
+			if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+					(CONNECTOR_ID_MXM == object_id.id))
+				continue;
+
+			/* Remove support for all device tags. */
+			if (bios_parser_get_device_tag_record(
+					bp, object, &dev_tag_record) != BP_RESULT_OK)
+				continue;
+
+			for (j = 0; j < dev_tag_record->ucNumberOfDevice; ++j) {
+				ATOM_CONNECTOR_DEVICE_TAG *device_tag =
+						&dev_tag_record->asDeviceTag[j];
+				cached_device_support &=
+						~le16_to_cpu(device_tag->usDeviceID);
+			}
+		}
+	}
+
+	/* Find all MXM connector objects and patch them with connector info
+	 * from the external display connection info table. */
+	for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+		uint32_t j;
+
+		object = &connector_tbl->asObjects[i];
+		object_id = object_id_from_bios_object_id(le16_to_cpu(object->usObjectID));
+		if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+				(CONNECTOR_ID_MXM != object_id.id))
+			continue;
+
+		/* Get the correct connection info table entry based on the enum
+		 * id. */
+		ext_display_path = get_ext_display_path_entry(
+				&ext_display_connection_info_tbl,
+				le16_to_cpu(object->usObjectID));
+		if (!ext_display_path)
+			return BP_RESULT_FAILURE;
+
+		/* Patch device connector ID */
+		object->usObjectID =
+				cpu_to_le16(le16_to_cpu(ext_display_path->usDeviceConnector));
+
+		/* Patch device tag, ulACPIDeviceEnum. */
+		add_device_tag_from_ext_display_path(
+				bp,
+				object,
+				ext_display_path,
+				&cached_device_support);
+
+		/* Patch HPD info */
+		if (ext_display_path->ucExtHPDPINLutIndex <
+				MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES) {
+			hpd_record = get_hpd_record(bp, object);
+			if (hpd_record) {
+				uint8_t index =
+						ext_display_path->ucExtHPDPINLutIndex;
+				hpd_record->ucHPDIntGPIOID =
+						hpd_pin_lut_record->ucHPDPINMap[index];
+			} else {
+				BREAK_TO_DEBUGGER();
+				/* Invalid hpd record */
+				return BP_RESULT_FAILURE;
+			}
+		}
+
+		/* Patch I2C/AUX info */
+		if (ext_display_path->ucExtHPDPINLutIndex <
+				MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES) {
+			i2c_record = get_i2c_record(bp, object);
+			if (i2c_record) {
+				uint8_t index =
+						ext_display_path->ucExtAUXDDCLutIndex;
+				i2c_record->sucI2cId =
+						aux_ddc_lut_record->ucAUXDDCMap[index];
+			} else {
+				BREAK_TO_DEBUGGER();
+				/* Invalid I2C record */
+				return BP_RESULT_FAILURE;
+			}
+		}
+
+		/* Merge with other MXM connectors that map to the same physical
+		 * connector. */
+		for (j = i + 1;
+				j < connector_tbl->ucNumberOfObjects; j++) {
+			ATOM_OBJECT *next_object;
+			struct graphics_object_id next_object_id;
+			EXT_DISPLAY_PATH *next_ext_display_path;
+
+			next_object = &connector_tbl->asObjects[j];
+			next_object_id = object_id_from_bios_object_id(
+					le16_to_cpu(next_object->usObjectID));
+
+			if ((OBJECT_TYPE_CONNECTOR != next_object_id.type) &&
+					(CONNECTOR_ID_MXM == next_object_id.id))
+				continue;
+
+			next_ext_display_path = get_ext_display_path_entry(
+					&ext_display_connection_info_tbl,
+					le16_to_cpu(next_object->usObjectID));
+
+			if (next_ext_display_path == NULL)
+				return BP_RESULT_FAILURE;
+
+			/* Merge if using same connector. */
+			if ((le16_to_cpu(next_ext_display_path->usDeviceConnector) ==
+					le16_to_cpu(ext_display_path->usDeviceConnector)) &&
+					(le16_to_cpu(ext_display_path->usDeviceConnector) != 0)) {
+				/* Clear duplicate connector from table. */
+				next_object->usObjectID = cpu_to_le16(0);
+				add_device_tag_from_ext_display_path(
+						bp,
+						object,
+						ext_display_path,
+						&cached_device_support);
+			}
+		}
+	}
+
+	/* Find all encoders which have an MXM object as their destination.
+	 *  Replace the MXM object with the real connector Id from the external
+	 *  display connection info table */
+
+	encoder_table_offset = bp->object_info_tbl_offset
+			+ le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+	encoder_table = GET_IMAGE(ATOM_OBJECT_TABLE, encoder_table_offset);
+
+	for (i = 0; i < encoder_table->ucNumberOfObjects; i++) {
+		uint32_t j;
+
+		object = &encoder_table->asObjects[i];
+
+		dst_number = get_dest_obj_list(bp, object, &dst_object_id_list);
+
+		for (j = 0; j < dst_number; j++) {
+			object_id = object_id_from_bios_object_id(
+					dst_object_id_list[j]);
+
+			if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+					(CONNECTOR_ID_MXM != object_id.id))
+				continue;
+
+			/* Get the correct connection info table entry based on
+			 * the enum id. */
+			ext_display_path =
+					get_ext_display_path_entry(
+							&ext_display_connection_info_tbl,
+							dst_object_id_list[j]);
+
+			if (ext_display_path == NULL)
+				return BP_RESULT_FAILURE;
+
+			dst_object_id_list[j] =
+					le16_to_cpu(ext_display_path->usDeviceConnector);
+		}
+	}
+
+	return BP_RESULT_OK;
+}
+
+/*
+ * Check whether we need to patch the VBIOS connector info table with
+ * data from an external display connection info table.  This is
+ * necessary to support MXM boards with an OPM (output personality
+ * module).  With these designs, the VBIOS connector info table
+ * specifies an MXM_CONNECTOR with a unique ID.  The driver retrieves
+ * the external connection info table through i2c and then looks up the
+ * connector ID to find the real connector type (e.g. DFP1).
+ *
+ */
+
+static void process_ext_display_connection_info(struct bios_parser *bp)
+{
+	ATOM_OBJECT_TABLE *connector_tbl;
+	uint32_t connector_tbl_offset;
+	struct graphics_object_id object_id;
+	ATOM_OBJECT *object;
+	bool mxm_connector_found = false;
+	bool null_entry_found = false;
+	uint32_t i = 0;
+
+	connector_tbl_offset = bp->object_info_tbl_offset +
+			le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+	connector_tbl = GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+
+	/* Look for MXM connectors to determine whether we need patch the VBIOS
+	 * connector info table. Look for null entries to determine whether we
+	 * need to compact connector table. */
+	for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+		object = &connector_tbl->asObjects[i];
+		object_id = object_id_from_bios_object_id(le16_to_cpu(object->usObjectID));
+
+		if ((OBJECT_TYPE_CONNECTOR == object_id.type) &&
+				(CONNECTOR_ID_MXM == object_id.id)) {
+			/* Once we found MXM connector - we can break */
+			mxm_connector_found = true;
+			break;
+		} else if (OBJECT_TYPE_CONNECTOR != object_id.type) {
+			/* We need to continue looping - to check if MXM
+			 * connector present */
+			null_entry_found = true;
+		}
+	}
+
+	/* Patch BIOS image */
+	if (mxm_connector_found || null_entry_found) {
+		uint32_t connectors_num = 0;
+		uint8_t *original_bios;
+		/* Step 1: Replace bios image with the new copy which will be
+		 * patched */
+		bp->base.bios_local_image = kzalloc(bp->base.bios_size,
+						    GFP_KERNEL);
+		if (bp->base.bios_local_image == NULL) {
+			BREAK_TO_DEBUGGER();
+			/* Failed to alloc bp->base.bios_local_image */
+			return;
+		}
+
+		memmove(bp->base.bios_local_image, bp->base.bios, bp->base.bios_size);
+		original_bios = bp->base.bios;
+		bp->base.bios = bp->base.bios_local_image;
+		connector_tbl =
+				GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+
+		/* Step 2: (only if MXM connector found) Patch BIOS image with
+		 * info from external module */
+		if (mxm_connector_found &&
+		    patch_bios_image_from_ext_display_connection_info(bp) !=
+						BP_RESULT_OK) {
+			/* Patching the bios image has failed. We will copy
+			 * again original image provided and afterwards
+			 * only remove null entries */
+			memmove(
+					bp->base.bios_local_image,
+					original_bios,
+					bp->base.bios_size);
+		}
+
+		/* Step 3: Compact connector table (remove null entries, valid
+		 * entries moved to beginning) */
+		for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+			object = &connector_tbl->asObjects[i];
+			object_id = object_id_from_bios_object_id(
+					le16_to_cpu(object->usObjectID));
+
+			if (OBJECT_TYPE_CONNECTOR != object_id.type)
+				continue;
+
+			if (i != connectors_num) {
+				memmove(
+						&connector_tbl->
+						asObjects[connectors_num],
+						object,
+						sizeof(ATOM_OBJECT));
+			}
+			++connectors_num;
+		}
+		connector_tbl->ucNumberOfObjects = (uint8_t)connectors_num;
+	}
+}
+
+static void bios_parser_post_init(struct dc_bios *dcb)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	process_ext_display_connection_info(bp);
+}
+
+/**
+ * bios_parser_set_scratch_critical_state
+ *
+ * @brief
+ *  update critical state bit in VBIOS scratch register
+ *
+ * @param
+ *  bool - to set or reset state
+ */
+static void bios_parser_set_scratch_critical_state(
+	struct dc_bios *dcb,
+	bool state)
+{
+	bios_set_scratch_critical_state(dcb, state);
+}
+
+/*
+ * get_integrated_info_v8
+ *
+ * @brief
+ * Get V8 integrated BIOS information
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ *                  BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result get_integrated_info_v8(
+	struct bios_parser *bp,
+	struct integrated_info *info)
+{
+	ATOM_INTEGRATED_SYSTEM_INFO_V1_8 *info_v8;
+	uint32_t i;
+
+	info_v8 = GET_IMAGE(ATOM_INTEGRATED_SYSTEM_INFO_V1_8,
+			bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
+
+	if (info_v8 == NULL)
+		return BP_RESULT_BADBIOSTABLE;
+	info->boot_up_engine_clock = le32_to_cpu(info_v8->ulBootUpEngineClock) * 10;
+	info->dentist_vco_freq = le32_to_cpu(info_v8->ulDentistVCOFreq) * 10;
+	info->boot_up_uma_clock = le32_to_cpu(info_v8->ulBootUpUMAClock) * 10;
+
+	for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+		/* Convert [10KHz] into [KHz] */
+		info->disp_clk_voltage[i].max_supported_clk =
+			le32_to_cpu(info_v8->sDISPCLK_Voltage[i].
+				    ulMaximumSupportedCLK) * 10;
+		info->disp_clk_voltage[i].voltage_index =
+			le32_to_cpu(info_v8->sDISPCLK_Voltage[i].ulVoltageIndex);
+	}
+
+	info->boot_up_req_display_vector =
+		le32_to_cpu(info_v8->ulBootUpReqDisplayVector);
+	info->gpu_cap_info =
+		le32_to_cpu(info_v8->ulGPUCapInfo);
+
+	/*
+	 * system_config: Bit[0] = 0 : PCIE power gating disabled
+	 *                       = 1 : PCIE power gating enabled
+	 *                Bit[1] = 0 : DDR-PLL shut down disabled
+	 *                       = 1 : DDR-PLL shut down enabled
+	 *                Bit[2] = 0 : DDR-PLL power down disabled
+	 *                       = 1 : DDR-PLL power down enabled
+	 */
+	info->system_config = le32_to_cpu(info_v8->ulSystemConfig);
+	info->cpu_cap_info = le32_to_cpu(info_v8->ulCPUCapInfo);
+	info->boot_up_nb_voltage =
+		le16_to_cpu(info_v8->usBootUpNBVoltage);
+	info->ext_disp_conn_info_offset =
+		le16_to_cpu(info_v8->usExtDispConnInfoOffset);
+	info->memory_type = info_v8->ucMemoryType;
+	info->ma_channel_number = info_v8->ucUMAChannelNumber;
+	info->gmc_restore_reset_time =
+		le32_to_cpu(info_v8->ulGMCRestoreResetTime);
+
+	info->minimum_n_clk =
+		le32_to_cpu(info_v8->ulNbpStateNClkFreq[0]);
+	for (i = 1; i < 4; ++i)
+		info->minimum_n_clk =
+			info->minimum_n_clk < le32_to_cpu(info_v8->ulNbpStateNClkFreq[i]) ?
+			info->minimum_n_clk : le32_to_cpu(info_v8->ulNbpStateNClkFreq[i]);
+
+	info->idle_n_clk = le32_to_cpu(info_v8->ulIdleNClk);
+	info->ddr_dll_power_up_time =
+		le32_to_cpu(info_v8->ulDDR_DLL_PowerUpTime);
+	info->ddr_pll_power_up_time =
+		le32_to_cpu(info_v8->ulDDR_PLL_PowerUpTime);
+	info->pcie_clk_ss_type = le16_to_cpu(info_v8->usPCIEClkSSType);
+	info->lvds_ss_percentage =
+		le16_to_cpu(info_v8->usLvdsSSPercentage);
+	info->lvds_sspread_rate_in_10hz =
+		le16_to_cpu(info_v8->usLvdsSSpreadRateIn10Hz);
+	info->hdmi_ss_percentage =
+		le16_to_cpu(info_v8->usHDMISSPercentage);
+	info->hdmi_sspread_rate_in_10hz =
+		le16_to_cpu(info_v8->usHDMISSpreadRateIn10Hz);
+	info->dvi_ss_percentage =
+		le16_to_cpu(info_v8->usDVISSPercentage);
+	info->dvi_sspread_rate_in_10_hz =
+		le16_to_cpu(info_v8->usDVISSpreadRateIn10Hz);
+
+	info->max_lvds_pclk_freq_in_single_link =
+		le16_to_cpu(info_v8->usMaxLVDSPclkFreqInSingleLink);
+	info->lvds_misc = info_v8->ucLvdsMisc;
+	info->lvds_pwr_on_seq_dig_on_to_de_in_4ms =
+		info_v8->ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+	info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms =
+		info_v8->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+	info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms =
+		info_v8->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+	info->lvds_pwr_off_seq_vary_bl_to_de_in4ms =
+		info_v8->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+	info->lvds_pwr_off_seq_de_to_dig_on_in4ms =
+		info_v8->ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+	info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms =
+		info_v8->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+	info->lvds_off_to_on_delay_in_4ms =
+		info_v8->ucLVDSOffToOnDelay_in4Ms;
+	info->lvds_bit_depth_control_val =
+		le32_to_cpu(info_v8->ulLCDBitDepthControlVal);
+
+	for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) {
+		/* Convert [10KHz] into [KHz] */
+		info->avail_s_clk[i].supported_s_clk =
+			le32_to_cpu(info_v8->sAvail_SCLK[i].ulSupportedSCLK) * 10;
+		info->avail_s_clk[i].voltage_index =
+			le16_to_cpu(info_v8->sAvail_SCLK[i].usVoltageIndex);
+		info->avail_s_clk[i].voltage_id =
+			le16_to_cpu(info_v8->sAvail_SCLK[i].usVoltageID);
+	}
+
+	for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
+		info->ext_disp_conn_info.gu_id[i] =
+			info_v8->sExtDispConnInfo.ucGuid[i];
+	}
+
+	for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
+		info->ext_disp_conn_info.path[i].device_connector_id =
+			object_id_from_bios_object_id(
+				le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceConnector));
+
+		info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
+			object_id_from_bios_object_id(
+				le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usExtEncoderObjId));
+
+		info->ext_disp_conn_info.path[i].device_tag =
+			le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceTag);
+		info->ext_disp_conn_info.path[i].device_acpi_enum =
+			le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceACPIEnum);
+		info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
+			info_v8->sExtDispConnInfo.sPath[i].ucExtAUXDDCLutIndex;
+		info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
+			info_v8->sExtDispConnInfo.sPath[i].ucExtHPDPINLutIndex;
+		info->ext_disp_conn_info.path[i].channel_mapping.raw =
+			info_v8->sExtDispConnInfo.sPath[i].ucChannelMapping;
+	}
+	info->ext_disp_conn_info.checksum =
+		info_v8->sExtDispConnInfo.ucChecksum;
+
+	return BP_RESULT_OK;
+}
+
+/*
+ * get_integrated_info_v8
+ *
+ * @brief
+ * Get V8 integrated BIOS information
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ *                  BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result get_integrated_info_v9(
+	struct bios_parser *bp,
+	struct integrated_info *info)
+{
+	ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info_v9;
+	uint32_t i;
+
+	info_v9 = GET_IMAGE(ATOM_INTEGRATED_SYSTEM_INFO_V1_9,
+			bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
+
+	if (!info_v9)
+		return BP_RESULT_BADBIOSTABLE;
+
+	info->boot_up_engine_clock = le32_to_cpu(info_v9->ulBootUpEngineClock) * 10;
+	info->dentist_vco_freq = le32_to_cpu(info_v9->ulDentistVCOFreq) * 10;
+	info->boot_up_uma_clock = le32_to_cpu(info_v9->ulBootUpUMAClock) * 10;
+
+	for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+		/* Convert [10KHz] into [KHz] */
+		info->disp_clk_voltage[i].max_supported_clk =
+			le32_to_cpu(info_v9->sDISPCLK_Voltage[i].ulMaximumSupportedCLK) * 10;
+		info->disp_clk_voltage[i].voltage_index =
+			le32_to_cpu(info_v9->sDISPCLK_Voltage[i].ulVoltageIndex);
+	}
+
+	info->boot_up_req_display_vector =
+		le32_to_cpu(info_v9->ulBootUpReqDisplayVector);
+	info->gpu_cap_info = le32_to_cpu(info_v9->ulGPUCapInfo);
+
+	/*
+	 * system_config: Bit[0] = 0 : PCIE power gating disabled
+	 *                       = 1 : PCIE power gating enabled
+	 *                Bit[1] = 0 : DDR-PLL shut down disabled
+	 *                       = 1 : DDR-PLL shut down enabled
+	 *                Bit[2] = 0 : DDR-PLL power down disabled
+	 *                       = 1 : DDR-PLL power down enabled
+	 */
+	info->system_config = le32_to_cpu(info_v9->ulSystemConfig);
+	info->cpu_cap_info = le32_to_cpu(info_v9->ulCPUCapInfo);
+	info->boot_up_nb_voltage = le16_to_cpu(info_v9->usBootUpNBVoltage);
+	info->ext_disp_conn_info_offset = le16_to_cpu(info_v9->usExtDispConnInfoOffset);
+	info->memory_type = info_v9->ucMemoryType;
+	info->ma_channel_number = info_v9->ucUMAChannelNumber;
+	info->gmc_restore_reset_time = le32_to_cpu(info_v9->ulGMCRestoreResetTime);
+
+	info->minimum_n_clk = le32_to_cpu(info_v9->ulNbpStateNClkFreq[0]);
+	for (i = 1; i < 4; ++i)
+		info->minimum_n_clk =
+			info->minimum_n_clk < le32_to_cpu(info_v9->ulNbpStateNClkFreq[i]) ?
+			info->minimum_n_clk : le32_to_cpu(info_v9->ulNbpStateNClkFreq[i]);
+
+	info->idle_n_clk = le32_to_cpu(info_v9->ulIdleNClk);
+	info->ddr_dll_power_up_time = le32_to_cpu(info_v9->ulDDR_DLL_PowerUpTime);
+	info->ddr_pll_power_up_time = le32_to_cpu(info_v9->ulDDR_PLL_PowerUpTime);
+	info->pcie_clk_ss_type = le16_to_cpu(info_v9->usPCIEClkSSType);
+	info->lvds_ss_percentage = le16_to_cpu(info_v9->usLvdsSSPercentage);
+	info->lvds_sspread_rate_in_10hz = le16_to_cpu(info_v9->usLvdsSSpreadRateIn10Hz);
+	info->hdmi_ss_percentage = le16_to_cpu(info_v9->usHDMISSPercentage);
+	info->hdmi_sspread_rate_in_10hz = le16_to_cpu(info_v9->usHDMISSpreadRateIn10Hz);
+	info->dvi_ss_percentage = le16_to_cpu(info_v9->usDVISSPercentage);
+	info->dvi_sspread_rate_in_10_hz = le16_to_cpu(info_v9->usDVISSpreadRateIn10Hz);
+
+	info->max_lvds_pclk_freq_in_single_link =
+		le16_to_cpu(info_v9->usMaxLVDSPclkFreqInSingleLink);
+	info->lvds_misc = info_v9->ucLvdsMisc;
+	info->lvds_pwr_on_seq_dig_on_to_de_in_4ms =
+		info_v9->ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+	info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms =
+		info_v9->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+	info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms =
+		info_v9->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+	info->lvds_pwr_off_seq_vary_bl_to_de_in4ms =
+		info_v9->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+	info->lvds_pwr_off_seq_de_to_dig_on_in4ms =
+		info_v9->ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+	info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms =
+		info_v9->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+	info->lvds_off_to_on_delay_in_4ms =
+		info_v9->ucLVDSOffToOnDelay_in4Ms;
+	info->lvds_bit_depth_control_val =
+		le32_to_cpu(info_v9->ulLCDBitDepthControlVal);
+
+	for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) {
+		/* Convert [10KHz] into [KHz] */
+		info->avail_s_clk[i].supported_s_clk =
+			le32_to_cpu(info_v9->sAvail_SCLK[i].ulSupportedSCLK) * 10;
+		info->avail_s_clk[i].voltage_index =
+			le16_to_cpu(info_v9->sAvail_SCLK[i].usVoltageIndex);
+		info->avail_s_clk[i].voltage_id =
+			le16_to_cpu(info_v9->sAvail_SCLK[i].usVoltageID);
+	}
+
+	for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
+		info->ext_disp_conn_info.gu_id[i] =
+			info_v9->sExtDispConnInfo.ucGuid[i];
+	}
+
+	for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
+		info->ext_disp_conn_info.path[i].device_connector_id =
+			object_id_from_bios_object_id(
+				le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceConnector));
+
+		info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
+			object_id_from_bios_object_id(
+				le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usExtEncoderObjId));
+
+		info->ext_disp_conn_info.path[i].device_tag =
+			le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceTag);
+		info->ext_disp_conn_info.path[i].device_acpi_enum =
+			le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceACPIEnum);
+		info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
+			info_v9->sExtDispConnInfo.sPath[i].ucExtAUXDDCLutIndex;
+		info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
+			info_v9->sExtDispConnInfo.sPath[i].ucExtHPDPINLutIndex;
+		info->ext_disp_conn_info.path[i].channel_mapping.raw =
+			info_v9->sExtDispConnInfo.sPath[i].ucChannelMapping;
+	}
+	info->ext_disp_conn_info.checksum =
+		info_v9->sExtDispConnInfo.ucChecksum;
+
+	return BP_RESULT_OK;
+}
+
+/*
+ * construct_integrated_info
+ *
+ * @brief
+ * Get integrated BIOS information based on table revision
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ *                  BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result construct_integrated_info(
+	struct bios_parser *bp,
+	struct integrated_info *info)
+{
+	enum bp_result result = BP_RESULT_BADBIOSTABLE;
+
+	ATOM_COMMON_TABLE_HEADER *header;
+	struct atom_data_revision revision;
+
+	if (bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo) {
+		header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+				bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
+
+		get_atom_data_table_revision(header, &revision);
+
+		/* Don't need to check major revision as they are all 1 */
+		switch (revision.minor) {
+		case 8:
+			result = get_integrated_info_v8(bp, info);
+			break;
+		case 9:
+			result = get_integrated_info_v9(bp, info);
+			break;
+		default:
+			return result;
+
+		}
+	}
+
+	/* Sort voltage table from low to high*/
+	if (result == BP_RESULT_OK) {
+		struct clock_voltage_caps temp = {0, 0};
+		uint32_t i;
+		uint32_t j;
+
+		for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+			for (j = i; j > 0; --j) {
+				if (
+						info->disp_clk_voltage[j].max_supported_clk <
+						info->disp_clk_voltage[j-1].max_supported_clk) {
+					/* swap j and j - 1*/
+					temp = info->disp_clk_voltage[j-1];
+					info->disp_clk_voltage[j-1] =
+							info->disp_clk_voltage[j];
+					info->disp_clk_voltage[j] = temp;
+				}
+			}
+		}
+
+	}
+
+	return result;
+}
+
+static struct integrated_info *bios_parser_create_integrated_info(
+	struct dc_bios *dcb)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	struct integrated_info *info = NULL;
+
+	info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL);
+
+	if (info == NULL) {
+		ASSERT_CRITICAL(0);
+		return NULL;
+	}
+
+	if (construct_integrated_info(bp, info) == BP_RESULT_OK)
+		return info;
+
+	kfree(info);
+
+	return NULL;
+}
+
+/******************************************************************************/
+
+static const struct dc_vbios_funcs vbios_funcs = {
+	.get_connectors_number = bios_parser_get_connectors_number,
+
+	.get_encoder_id = bios_parser_get_encoder_id,
+
+	.get_connector_id = bios_parser_get_connector_id,
+
+	.get_dst_number = bios_parser_get_dst_number,
+
+	.get_src_obj = bios_parser_get_src_obj,
+
+	.get_dst_obj = bios_parser_get_dst_obj,
+
+	.get_i2c_info = bios_parser_get_i2c_info,
+
+	.get_voltage_ddc_info = bios_parser_get_voltage_ddc_info,
+
+	.get_thermal_ddc_info = bios_parser_get_thermal_ddc_info,
+
+	.get_hpd_info = bios_parser_get_hpd_info,
+
+	.get_device_tag = bios_parser_get_device_tag,
+
+	.get_firmware_info = bios_parser_get_firmware_info,
+
+	.get_spread_spectrum_info = bios_parser_get_spread_spectrum_info,
+
+	.get_ss_entry_number = bios_parser_get_ss_entry_number,
+
+	.get_embedded_panel_info = bios_parser_get_embedded_panel_info,
+
+	.get_gpio_pin_info = bios_parser_get_gpio_pin_info,
+
+	.get_embedded_panel_info = bios_parser_get_embedded_panel_info,
+
+	.get_gpio_pin_info = bios_parser_get_gpio_pin_info,
+
+	.get_encoder_cap_info = bios_parser_get_encoder_cap_info,
+
+	/* bios scratch register communication */
+	.is_accelerated_mode = bios_is_accelerated_mode,
+
+	.set_scratch_critical_state = bios_parser_set_scratch_critical_state,
+
+	.is_device_id_supported = bios_parser_is_device_id_supported,
+
+	/* COMMANDS */
+	.encoder_control = bios_parser_encoder_control,
+
+	.transmitter_control = bios_parser_transmitter_control,
+
+	.crt_control = bios_parser_crt_control,  /* not used in DAL3.  keep for now in case we need to support VGA on Bonaire */
+
+	.enable_crtc = bios_parser_enable_crtc,
+
+	.adjust_pixel_clock = bios_parser_adjust_pixel_clock,
+
+	.set_pixel_clock = bios_parser_set_pixel_clock,
+
+	.set_dce_clock = bios_parser_set_dce_clock,
+
+	.enable_spread_spectrum_on_ppll = bios_parser_enable_spread_spectrum_on_ppll,
+
+	.program_crtc_timing = bios_parser_program_crtc_timing, /* still use.  should probably retire and program directly */
+
+	.crtc_source_select = bios_parser_crtc_source_select,  /* still use.  should probably retire and program directly */
+
+	.program_display_engine_pll = bios_parser_program_display_engine_pll,
+
+	.enable_disp_power_gating = bios_parser_enable_disp_power_gating,
+
+	/* SW init and patch */
+	.post_init = bios_parser_post_init,  /* patch vbios table for mxm module by reading i2c */
+
+	.bios_parser_destroy = bios_parser_destroy,
+};
+
+static bool bios_parser_construct(
+	struct bios_parser *bp,
+	struct bp_init_data *init,
+	enum dce_version dce_version)
+{
+	uint16_t *rom_header_offset = NULL;
+	ATOM_ROM_HEADER *rom_header = NULL;
+	ATOM_OBJECT_HEADER *object_info_tbl;
+	struct atom_data_revision tbl_rev = {0};
+
+	if (!init)
+		return false;
+
+	if (!init->bios)
+		return false;
+
+	bp->base.funcs = &vbios_funcs;
+	bp->base.bios = init->bios;
+	bp->base.bios_size = bp->base.bios[BIOS_IMAGE_SIZE_OFFSET] * BIOS_IMAGE_SIZE_UNIT;
+
+	bp->base.ctx = init->ctx;
+	bp->base.bios_local_image = NULL;
+
+	rom_header_offset =
+	GET_IMAGE(uint16_t, OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER);
+
+	if (!rom_header_offset)
+		return false;
+
+	rom_header = GET_IMAGE(ATOM_ROM_HEADER, *rom_header_offset);
+
+	if (!rom_header)
+		return false;
+
+	get_atom_data_table_revision(&rom_header->sHeader, &tbl_rev);
+	if (tbl_rev.major >= 2 && tbl_rev.minor >= 2)
+		return false;
+
+	bp->master_data_tbl =
+	GET_IMAGE(ATOM_MASTER_DATA_TABLE,
+		rom_header->usMasterDataTableOffset);
+
+	if (!bp->master_data_tbl)
+		return false;
+
+	bp->object_info_tbl_offset = DATA_TABLES(Object_Header);
+
+	if (!bp->object_info_tbl_offset)
+		return false;
+
+	object_info_tbl =
+	GET_IMAGE(ATOM_OBJECT_HEADER, bp->object_info_tbl_offset);
+
+	if (!object_info_tbl)
+		return false;
+
+	get_atom_data_table_revision(&object_info_tbl->sHeader,
+		&bp->object_info_tbl.revision);
+
+	if (bp->object_info_tbl.revision.major == 1
+		&& bp->object_info_tbl.revision.minor >= 3) {
+		ATOM_OBJECT_HEADER_V3 *tbl_v3;
+
+		tbl_v3 = GET_IMAGE(ATOM_OBJECT_HEADER_V3,
+			bp->object_info_tbl_offset);
+		if (!tbl_v3)
+			return false;
+
+		bp->object_info_tbl.v1_3 = tbl_v3;
+	} else if (bp->object_info_tbl.revision.major == 1
+		&& bp->object_info_tbl.revision.minor >= 1)
+		bp->object_info_tbl.v1_1 = object_info_tbl;
+	else
+		return false;
+
+	dal_bios_parser_init_cmd_tbl(bp);
+	dal_bios_parser_init_cmd_tbl_helper(&bp->cmd_helper, dce_version);
+
+	bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
+
+	return true;
+}
+
+/******************************************************************************/

+ 33 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser.h

@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_H__
+#define __DAL_BIOS_PARSER_H__
+
+struct dc_bios *bios_parser_create(
+	struct bp_init_data *init,
+	enum dce_version dce_version);
+
+#endif

+ 1934 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c

@@ -0,0 +1,1934 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "ObjectID.h"
+#include "atomfirmware.h"
+
+#include "dc_bios_types.h"
+#include "include/grph_object_ctrl_defs.h"
+#include "include/bios_parser_interface.h"
+#include "include/i2caux_interface.h"
+#include "include/logger_interface.h"
+
+#include "command_table2.h"
+
+#include "bios_parser_helper.h"
+#include "command_table_helper2.h"
+#include "bios_parser2.h"
+#include "bios_parser_types_internal2.h"
+#include "bios_parser_interface.h"
+
+#include "bios_parser_common.h"
+#define LAST_RECORD_TYPE 0xff
+
+
+struct i2c_id_config_access {
+	uint8_t bfI2C_LineMux:4;
+	uint8_t bfHW_EngineID:3;
+	uint8_t bfHW_Capable:1;
+	uint8_t ucAccess;
+};
+
+static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
+	struct atom_i2c_record *record,
+	struct graphics_object_i2c_info *info);
+
+static enum bp_result bios_parser_get_firmware_info(
+	struct dc_bios *dcb,
+	struct dc_firmware_info *info);
+
+static enum bp_result bios_parser_get_encoder_cap_info(
+	struct dc_bios *dcb,
+	struct graphics_object_id object_id,
+	struct bp_encoder_cap_info *info);
+
+static enum bp_result get_firmware_info_v3_1(
+	struct bios_parser *bp,
+	struct dc_firmware_info *info);
+
+static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp,
+		struct atom_display_object_path_v2 *object);
+
+static struct atom_encoder_caps_record *get_encoder_cap_record(
+	struct bios_parser *bp,
+	struct atom_display_object_path_v2 *object);
+
+#define BIOS_IMAGE_SIZE_OFFSET 2
+#define BIOS_IMAGE_SIZE_UNIT 512
+
+#define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table)
+
+
+static void destruct(struct bios_parser *bp)
+{
+	kfree(bp->base.bios_local_image);
+	kfree(bp->base.integrated_info);
+}
+
+static void firmware_parser_destroy(struct dc_bios **dcb)
+{
+	struct bios_parser *bp = BP_FROM_DCB(*dcb);
+
+	if (!bp) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	destruct(bp);
+
+	kfree(bp);
+	*dcb = NULL;
+}
+
+static void get_atom_data_table_revision(
+	struct atom_common_table_header *atom_data_tbl,
+	struct atom_data_revision *tbl_revision)
+{
+	if (!tbl_revision)
+		return;
+
+	/* initialize the revision to 0 which is invalid revision */
+	tbl_revision->major = 0;
+	tbl_revision->minor = 0;
+
+	if (!atom_data_tbl)
+		return;
+
+	tbl_revision->major =
+			(uint32_t) atom_data_tbl->format_revision & 0x3f;
+	tbl_revision->minor =
+			(uint32_t) atom_data_tbl->content_revision & 0x3f;
+}
+
+/* BIOS oject table displaypath is per connector.
+ * There is extra path not for connector. BIOS fill its encoderid as 0
+ */
+static uint8_t bios_parser_get_connectors_number(struct dc_bios *dcb)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	unsigned int count = 0;
+	unsigned int i;
+
+	for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) {
+		if (bp->object_info_tbl.v1_4->display_path[i].encoderobjid != 0)
+			count++;
+	}
+	return count;
+}
+
+static struct graphics_object_id bios_parser_get_encoder_id(
+	struct dc_bios *dcb,
+	uint32_t i)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	struct graphics_object_id object_id = dal_graphics_object_id_init(
+		0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+
+	if (bp->object_info_tbl.v1_4->number_of_path > i)
+		object_id = object_id_from_bios_object_id(
+		bp->object_info_tbl.v1_4->display_path[i].encoderobjid);
+
+	return object_id;
+}
+
+static struct graphics_object_id bios_parser_get_connector_id(
+	struct dc_bios *dcb,
+	uint8_t i)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	struct graphics_object_id object_id = dal_graphics_object_id_init(
+		0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+	struct object_info_table *tbl = &bp->object_info_tbl;
+	struct display_object_info_table_v1_4 *v1_4 = tbl->v1_4;
+
+	if (v1_4->number_of_path > i) {
+		/* If display_objid is generic object id,  the encoderObj
+		 * /extencoderobjId should be 0
+		 */
+		if (v1_4->display_path[i].encoderobjid != 0 &&
+				v1_4->display_path[i].display_objid != 0)
+			object_id = object_id_from_bios_object_id(
+					v1_4->display_path[i].display_objid);
+	}
+
+	return object_id;
+}
+
+
+/*  TODO:  GetNumberOfSrc*/
+
+static uint32_t bios_parser_get_dst_number(struct dc_bios *dcb,
+	struct graphics_object_id id)
+{
+	/* connector has 1 Dest, encoder has 0 Dest */
+	switch (id.type) {
+	case OBJECT_TYPE_ENCODER:
+		return 0;
+	case OBJECT_TYPE_CONNECTOR:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+/*  removed getSrcObjList, getDestObjList*/
+
+
+static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb,
+	struct graphics_object_id object_id, uint32_t index,
+	struct graphics_object_id *src_object_id)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	unsigned int i;
+	enum bp_result  bp_result = BP_RESULT_BADINPUT;
+	struct graphics_object_id obj_id = {0};
+	struct object_info_table *tbl = &bp->object_info_tbl;
+
+	if (!src_object_id)
+		return bp_result;
+
+	switch (object_id.type) {
+	/* Encoder's Source is GPU.  BIOS does not provide GPU, since all
+	 * displaypaths point to same GPU (0x1100).  Hardcode GPU object type
+	 */
+	case OBJECT_TYPE_ENCODER:
+		/* TODO: since num of src must be less than 2.
+		 * If found in for loop, should break.
+		 * DAL2 implementation may be changed too
+		 */
+		for (i = 0; i < tbl->v1_4->number_of_path; i++) {
+			obj_id = object_id_from_bios_object_id(
+			tbl->v1_4->display_path[i].encoderobjid);
+			if (object_id.type == obj_id.type &&
+					object_id.id == obj_id.id &&
+						object_id.enum_id ==
+							obj_id.enum_id) {
+				*src_object_id =
+				object_id_from_bios_object_id(0x1100);
+				/* break; */
+			}
+		}
+		bp_result = BP_RESULT_OK;
+		break;
+	case OBJECT_TYPE_CONNECTOR:
+		for (i = 0; i < tbl->v1_4->number_of_path; i++) {
+			obj_id = object_id_from_bios_object_id(
+				tbl->v1_4->display_path[i].display_objid);
+
+			if (object_id.type == obj_id.type &&
+				object_id.id == obj_id.id &&
+					object_id.enum_id == obj_id.enum_id) {
+				*src_object_id =
+				object_id_from_bios_object_id(
+				tbl->v1_4->display_path[i].encoderobjid);
+				/* break; */
+			}
+		}
+		bp_result = BP_RESULT_OK;
+		break;
+	default:
+		break;
+	}
+
+	return bp_result;
+}
+
+static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
+	struct graphics_object_id object_id, uint32_t index,
+	struct graphics_object_id *dest_object_id)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	unsigned int i;
+	enum bp_result  bp_result = BP_RESULT_BADINPUT;
+	struct graphics_object_id obj_id = {0};
+	struct object_info_table *tbl = &bp->object_info_tbl;
+
+	if (!dest_object_id)
+		return BP_RESULT_BADINPUT;
+
+	switch (object_id.type) {
+	case OBJECT_TYPE_ENCODER:
+		/* TODO: since num of src must be less than 2.
+		 * If found in for loop, should break.
+		 * DAL2 implementation may be changed too
+		 */
+		for (i = 0; i < tbl->v1_4->number_of_path; i++) {
+			obj_id = object_id_from_bios_object_id(
+				tbl->v1_4->display_path[i].encoderobjid);
+			if (object_id.type == obj_id.type &&
+					object_id.id == obj_id.id &&
+						object_id.enum_id ==
+							obj_id.enum_id) {
+				*dest_object_id =
+					object_id_from_bios_object_id(
+				tbl->v1_4->display_path[i].display_objid);
+				/* break; */
+			}
+		}
+		bp_result = BP_RESULT_OK;
+		break;
+	default:
+		break;
+	}
+
+	return bp_result;
+}
+
+
+/* from graphics_object_id, find display path which includes the object_id */
+static struct atom_display_object_path_v2 *get_bios_object(
+	struct bios_parser *bp,
+	struct graphics_object_id id)
+{
+	unsigned int i;
+	struct graphics_object_id obj_id = {0};
+
+	switch (id.type) {
+	case OBJECT_TYPE_ENCODER:
+		for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) {
+			obj_id = object_id_from_bios_object_id(
+			bp->object_info_tbl.v1_4->display_path[i].encoderobjid);
+			if (id.type == obj_id.type &&
+					id.id == obj_id.id &&
+						id.enum_id == obj_id.enum_id)
+				return
+				&bp->object_info_tbl.v1_4->display_path[i];
+		}
+	case OBJECT_TYPE_CONNECTOR:
+	case OBJECT_TYPE_GENERIC:
+		/* Both Generic and Connector Object ID
+		 * will be stored on display_objid
+		*/
+		for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) {
+			obj_id = object_id_from_bios_object_id(
+			bp->object_info_tbl.v1_4->display_path[i].display_objid
+			);
+			if (id.type == obj_id.type &&
+					id.id == obj_id.id &&
+						id.enum_id == obj_id.enum_id)
+				return
+				&bp->object_info_tbl.v1_4->display_path[i];
+		}
+	default:
+		return NULL;
+	}
+}
+
+static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,
+	struct graphics_object_id id,
+	struct graphics_object_i2c_info *info)
+{
+	uint32_t offset;
+	struct atom_display_object_path_v2 *object;
+	struct atom_common_record_header *header;
+	struct atom_i2c_record *record;
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	object = get_bios_object(bp, id);
+
+	if (!object)
+		return BP_RESULT_BADINPUT;
+
+	offset = object->disp_recordoffset + bp->object_info_tbl_offset;
+
+	for (;;) {
+		header = GET_IMAGE(struct atom_common_record_header, offset);
+
+		if (!header)
+			return BP_RESULT_BADBIOSTABLE;
+
+		if (header->record_type == LAST_RECORD_TYPE ||
+			!header->record_size)
+			break;
+
+		if (header->record_type == ATOM_I2C_RECORD_TYPE
+			&& sizeof(struct atom_i2c_record) <=
+							header->record_size) {
+			/* get the I2C info */
+			record = (struct atom_i2c_record *) header;
+
+			if (get_gpio_i2c_info(bp, record, info) ==
+								BP_RESULT_OK)
+				return BP_RESULT_OK;
+		}
+
+		offset += header->record_size;
+	}
+
+	return BP_RESULT_NORECORD;
+}
+
+static enum bp_result get_gpio_i2c_info(
+	struct bios_parser *bp,
+	struct atom_i2c_record *record,
+	struct graphics_object_i2c_info *info)
+{
+	struct atom_gpio_pin_lut_v2_1 *header;
+	uint32_t count = 0;
+	unsigned int table_index = 0;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	/* get the GPIO_I2C info */
+	if (!DATA_TABLES(gpio_pin_lut))
+		return BP_RESULT_BADBIOSTABLE;
+
+	header = GET_IMAGE(struct atom_gpio_pin_lut_v2_1,
+					DATA_TABLES(gpio_pin_lut));
+	if (!header)
+		return BP_RESULT_BADBIOSTABLE;
+
+	if (sizeof(struct atom_common_table_header) +
+			sizeof(struct atom_gpio_pin_assignment)	>
+			le16_to_cpu(header->table_header.structuresize))
+		return BP_RESULT_BADBIOSTABLE;
+
+	/* TODO: is version change? */
+	if (header->table_header.content_revision != 1)
+		return BP_RESULT_UNSUPPORTED;
+
+	/* get data count */
+	count = (le16_to_cpu(header->table_header.structuresize)
+			- sizeof(struct atom_common_table_header))
+				/ sizeof(struct atom_gpio_pin_assignment);
+
+	table_index = record->i2c_id  & I2C_HW_LANE_MUX;
+
+	if (count < table_index) {
+		bool find_valid = false;
+
+		for (table_index = 0; table_index < count; table_index++) {
+			if (((record->i2c_id & I2C_HW_CAP) == (
+			header->gpio_pin[table_index].gpio_id &
+							I2C_HW_CAP)) &&
+			((record->i2c_id & I2C_HW_ENGINE_ID_MASK)  ==
+			(header->gpio_pin[table_index].gpio_id &
+						I2C_HW_ENGINE_ID_MASK)) &&
+			((record->i2c_id & I2C_HW_LANE_MUX) ==
+			(header->gpio_pin[table_index].gpio_id &
+							I2C_HW_LANE_MUX))) {
+				/* still valid */
+				find_valid = true;
+				break;
+			}
+		}
+		/* If we don't find the entry that we are looking for then
+		 *  we will return BP_Result_BadBiosTable.
+		 */
+		if (find_valid == false)
+			return BP_RESULT_BADBIOSTABLE;
+	}
+
+	/* get the GPIO_I2C_INFO */
+	info->i2c_hw_assist = (record->i2c_id & I2C_HW_CAP) ? true : false;
+	info->i2c_line = record->i2c_id & I2C_HW_LANE_MUX;
+	info->i2c_engine_id = (record->i2c_id & I2C_HW_ENGINE_ID_MASK) >> 4;
+	info->i2c_slave_address = record->i2c_slave_addr;
+
+	/* TODO: check how to get register offset for en, Y, etc. */
+	info->gpio_info.clk_a_register_index =
+			le16_to_cpu(
+			header->gpio_pin[table_index].data_a_reg_index);
+	info->gpio_info.clk_a_shift =
+			header->gpio_pin[table_index].gpio_bitshift;
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result get_voltage_ddc_info_v4(
+	uint8_t *i2c_line,
+	uint32_t index,
+	struct atom_common_table_header *header,
+	uint8_t *address)
+{
+	enum bp_result result = BP_RESULT_NORECORD;
+	struct atom_voltage_objects_info_v4_1 *info =
+		(struct atom_voltage_objects_info_v4_1 *) address;
+
+	uint8_t *voltage_current_object =
+		(uint8_t *) (&(info->voltage_object[0]));
+
+	while ((address + le16_to_cpu(header->structuresize)) >
+						voltage_current_object) {
+		struct atom_i2c_voltage_object_v4 *object =
+			(struct atom_i2c_voltage_object_v4 *)
+						voltage_current_object;
+
+		if (object->header.voltage_mode ==
+			ATOM_INIT_VOLTAGE_REGULATOR) {
+			if (object->header.voltage_type == index) {
+				*i2c_line = object->i2c_id ^ 0x90;
+				result = BP_RESULT_OK;
+				break;
+			}
+		}
+
+		voltage_current_object +=
+				le16_to_cpu(object->header.object_size);
+	}
+	return result;
+}
+
+static enum bp_result bios_parser_get_thermal_ddc_info(
+	struct dc_bios *dcb,
+	uint32_t i2c_channel_id,
+	struct graphics_object_i2c_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	struct i2c_id_config_access *config;
+	struct atom_i2c_record record;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	config = (struct i2c_id_config_access *) &i2c_channel_id;
+
+	record.i2c_id = config->bfHW_Capable;
+	record.i2c_id |= config->bfI2C_LineMux;
+	record.i2c_id |= config->bfHW_EngineID;
+
+	return get_gpio_i2c_info(bp, &record, info);
+}
+
+static enum bp_result bios_parser_get_voltage_ddc_info(struct dc_bios *dcb,
+	uint32_t index,
+	struct graphics_object_i2c_info *info)
+{
+	uint8_t i2c_line = 0;
+	enum bp_result result = BP_RESULT_NORECORD;
+	uint8_t *voltage_info_address;
+	struct atom_common_table_header *header;
+	struct atom_data_revision revision = {0};
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!DATA_TABLES(voltageobject_info))
+		return result;
+
+	voltage_info_address = bios_get_image(&bp->base,
+			DATA_TABLES(voltageobject_info),
+			sizeof(struct atom_common_table_header));
+
+	header = (struct atom_common_table_header *) voltage_info_address;
+
+	get_atom_data_table_revision(header, &revision);
+
+	switch (revision.major) {
+	case 4:
+		if (revision.minor != 1)
+			break;
+		result = get_voltage_ddc_info_v4(&i2c_line, index, header,
+			voltage_info_address);
+		break;
+	}
+
+	if (result == BP_RESULT_OK)
+		result = bios_parser_get_thermal_ddc_info(dcb,
+			i2c_line, info);
+
+	return result;
+}
+
+static enum bp_result bios_parser_get_hpd_info(
+	struct dc_bios *dcb,
+	struct graphics_object_id id,
+	struct graphics_object_hpd_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	struct atom_display_object_path_v2 *object;
+	struct atom_hpd_int_record *record = NULL;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	object = get_bios_object(bp, id);
+
+	if (!object)
+		return BP_RESULT_BADINPUT;
+
+	record = get_hpd_record(bp, object);
+
+	if (record != NULL) {
+		info->hpd_int_gpio_uid = record->pin_id;
+		info->hpd_active = record->plugin_pin_state;
+		return BP_RESULT_OK;
+	}
+
+	return BP_RESULT_NORECORD;
+}
+
+static struct atom_hpd_int_record *get_hpd_record(
+	struct bios_parser *bp,
+	struct atom_display_object_path_v2 *object)
+{
+	struct atom_common_record_header *header;
+	uint32_t offset;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid object */
+		return NULL;
+	}
+
+	offset = le16_to_cpu(object->disp_recordoffset)
+			+ bp->object_info_tbl_offset;
+
+	for (;;) {
+		header = GET_IMAGE(struct atom_common_record_header, offset);
+
+		if (!header)
+			return NULL;
+
+		if (header->record_type == LAST_RECORD_TYPE ||
+			!header->record_size)
+			break;
+
+		if (header->record_type == ATOM_HPD_INT_RECORD_TYPE
+			&& sizeof(struct atom_hpd_int_record) <=
+							header->record_size)
+			return (struct atom_hpd_int_record *) header;
+
+		offset += header->record_size;
+	}
+
+	return NULL;
+}
+
+/**
+ * bios_parser_get_gpio_pin_info
+ * Get GpioPin information of input gpio id
+ *
+ * @param gpio_id, GPIO ID
+ * @param info, GpioPin information structure
+ * @return Bios parser result code
+ * @note
+ *  to get the GPIO PIN INFO, we need:
+ *  1. get the GPIO_ID from other object table, see GetHPDInfo()
+ *  2. in DATA_TABLE.GPIO_Pin_LUT, search all records,
+ *	to get the registerA  offset/mask
+ */
+static enum bp_result bios_parser_get_gpio_pin_info(
+	struct dc_bios *dcb,
+	uint32_t gpio_id,
+	struct gpio_pin_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	struct atom_gpio_pin_lut_v2_1 *header;
+	uint32_t count = 0;
+	uint32_t i = 0;
+
+	if (!DATA_TABLES(gpio_pin_lut))
+		return BP_RESULT_BADBIOSTABLE;
+
+	header = GET_IMAGE(struct atom_gpio_pin_lut_v2_1,
+						DATA_TABLES(gpio_pin_lut));
+	if (!header)
+		return BP_RESULT_BADBIOSTABLE;
+
+	if (sizeof(struct atom_common_table_header) +
+			sizeof(struct atom_gpio_pin_lut_v2_1)
+			> le16_to_cpu(header->table_header.structuresize))
+		return BP_RESULT_BADBIOSTABLE;
+
+	if (header->table_header.content_revision != 1)
+		return BP_RESULT_UNSUPPORTED;
+
+	/* Temporary hard code gpio pin info */
+#if defined(FOR_SIMNOW_BOOT)
+	{
+		struct  atom_gpio_pin_assignment  gpio_pin[8] = {
+				{0x5db5, 0, 0, 1, 0},
+				{0x5db5, 8, 8, 2, 0},
+				{0x5db5, 0x10, 0x10, 3, 0},
+				{0x5db5, 0x18, 0x14, 4, 0},
+				{0x5db5, 0x1A, 0x18, 5, 0},
+				{0x5db5, 0x1C, 0x1C, 6, 0},
+		};
+
+		count = 6;
+		memmove(header->gpio_pin, gpio_pin, sizeof(gpio_pin));
+	}
+#else
+	count = (le16_to_cpu(header->table_header.structuresize)
+			- sizeof(struct atom_common_table_header))
+				/ sizeof(struct atom_gpio_pin_assignment);
+#endif
+	for (i = 0; i < count; ++i) {
+		if (header->gpio_pin[i].gpio_id != gpio_id)
+			continue;
+
+		info->offset =
+			(uint32_t) le16_to_cpu(
+					header->gpio_pin[i].data_a_reg_index);
+		info->offset_y = info->offset + 2;
+		info->offset_en = info->offset + 1;
+		info->offset_mask = info->offset - 1;
+
+		info->mask = (uint32_t) (1 <<
+			header->gpio_pin[i].gpio_bitshift);
+		info->mask_y = info->mask + 2;
+		info->mask_en = info->mask + 1;
+		info->mask_mask = info->mask - 1;
+
+		return BP_RESULT_OK;
+	}
+
+	return BP_RESULT_NORECORD;
+}
+
+static struct device_id device_type_from_device_id(uint16_t device_id)
+{
+
+	struct device_id result_device_id;
+
+	result_device_id.raw_device_tag = device_id;
+
+	switch (device_id) {
+	case ATOM_DISPLAY_LCD1_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_LCD;
+		result_device_id.enum_id = 1;
+		break;
+
+	case ATOM_DISPLAY_DFP1_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 1;
+		break;
+
+	case ATOM_DISPLAY_DFP2_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 2;
+		break;
+
+	case ATOM_DISPLAY_DFP3_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 3;
+		break;
+
+	case ATOM_DISPLAY_DFP4_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 4;
+		break;
+
+	case ATOM_DISPLAY_DFP5_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 5;
+		break;
+
+	case ATOM_DISPLAY_DFP6_SUPPORT:
+		result_device_id.device_type = DEVICE_TYPE_DFP;
+		result_device_id.enum_id = 6;
+		break;
+
+	default:
+		BREAK_TO_DEBUGGER(); /* Invalid device Id */
+		result_device_id.device_type = DEVICE_TYPE_UNKNOWN;
+		result_device_id.enum_id = 0;
+	}
+	return result_device_id;
+}
+
+static enum bp_result bios_parser_get_device_tag(
+	struct dc_bios *dcb,
+	struct graphics_object_id connector_object_id,
+	uint32_t device_tag_index,
+	struct connector_device_tag_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	struct atom_display_object_path_v2 *object;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	/* getBiosObject will return MXM object */
+	object = get_bios_object(bp, connector_object_id);
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid object id */
+		return BP_RESULT_BADINPUT;
+	}
+
+	info->acpi_device = 0; /* BIOS no longer provides this */
+	info->dev_id = device_type_from_device_id(object->device_tag);
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result get_ss_info_v4_1(
+	struct bios_parser *bp,
+	uint32_t id,
+	uint32_t index,
+	struct spread_spectrum_info *ss_info)
+{
+	enum bp_result result = BP_RESULT_OK;
+	struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL;
+
+	if (!ss_info)
+		return BP_RESULT_BADINPUT;
+
+	if (!DATA_TABLES(dce_info))
+		return BP_RESULT_BADBIOSTABLE;
+
+	disp_cntl_tbl =  GET_IMAGE(struct atom_display_controller_info_v4_1,
+							DATA_TABLES(dce_info));
+	if (!disp_cntl_tbl)
+		return BP_RESULT_BADBIOSTABLE;
+
+	ss_info->type.STEP_AND_DELAY_INFO = false;
+	ss_info->spread_percentage_divider = 1000;
+	/* BIOS no longer uses target clock.  Always enable for now */
+	ss_info->target_clock_range = 0xffffffff;
+
+	switch (id) {
+	case AS_SIGNAL_TYPE_DVI:
+		ss_info->spread_spectrum_percentage =
+				disp_cntl_tbl->dvi_ss_percentage;
+		ss_info->spread_spectrum_range =
+				disp_cntl_tbl->dvi_ss_rate_10hz * 10;
+		if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+			ss_info->type.CENTER_MODE = true;
+		break;
+	case AS_SIGNAL_TYPE_HDMI:
+		ss_info->spread_spectrum_percentage =
+				disp_cntl_tbl->hdmi_ss_percentage;
+		ss_info->spread_spectrum_range =
+				disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
+		if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+			ss_info->type.CENTER_MODE = true;
+		break;
+	/* TODO LVDS not support anymore? */
+	case AS_SIGNAL_TYPE_DISPLAY_PORT:
+		ss_info->spread_spectrum_percentage =
+				disp_cntl_tbl->dp_ss_percentage;
+		ss_info->spread_spectrum_range =
+				disp_cntl_tbl->dp_ss_rate_10hz * 10;
+		if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+			ss_info->type.CENTER_MODE = true;
+		break;
+	case AS_SIGNAL_TYPE_GPU_PLL:
+		/* atom_firmware: DAL only get data from dce_info table.
+		 * if data within smu_info is needed for DAL, VBIOS should
+		 * copy it into dce_info
+		 */
+		result = BP_RESULT_UNSUPPORTED;
+		break;
+	default:
+		result = BP_RESULT_UNSUPPORTED;
+	}
+
+	return result;
+}
+
+static enum bp_result get_ss_info_v4_2(
+	struct bios_parser *bp,
+	uint32_t id,
+	uint32_t index,
+	struct spread_spectrum_info *ss_info)
+{
+	enum bp_result result = BP_RESULT_OK;
+	struct atom_display_controller_info_v4_2 *disp_cntl_tbl = NULL;
+	struct atom_smu_info_v3_1 *smu_info = NULL;
+
+	if (!ss_info)
+		return BP_RESULT_BADINPUT;
+
+	if (!DATA_TABLES(dce_info))
+		return BP_RESULT_BADBIOSTABLE;
+
+	if (!DATA_TABLES(smu_info))
+		return BP_RESULT_BADBIOSTABLE;
+
+	disp_cntl_tbl =  GET_IMAGE(struct atom_display_controller_info_v4_2,
+							DATA_TABLES(dce_info));
+	if (!disp_cntl_tbl)
+		return BP_RESULT_BADBIOSTABLE;
+
+	smu_info =  GET_IMAGE(struct atom_smu_info_v3_1, DATA_TABLES(smu_info));
+	if (!smu_info)
+		return BP_RESULT_BADBIOSTABLE;
+
+	ss_info->type.STEP_AND_DELAY_INFO = false;
+	ss_info->spread_percentage_divider = 1000;
+	/* BIOS no longer uses target clock.  Always enable for now */
+	ss_info->target_clock_range = 0xffffffff;
+
+	switch (id) {
+	case AS_SIGNAL_TYPE_DVI:
+		ss_info->spread_spectrum_percentage =
+				disp_cntl_tbl->dvi_ss_percentage;
+		ss_info->spread_spectrum_range =
+				disp_cntl_tbl->dvi_ss_rate_10hz * 10;
+		if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+			ss_info->type.CENTER_MODE = true;
+		break;
+	case AS_SIGNAL_TYPE_HDMI:
+		ss_info->spread_spectrum_percentage =
+				disp_cntl_tbl->hdmi_ss_percentage;
+		ss_info->spread_spectrum_range =
+				disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
+		if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+			ss_info->type.CENTER_MODE = true;
+		break;
+	/* TODO LVDS not support anymore? */
+	case AS_SIGNAL_TYPE_DISPLAY_PORT:
+		ss_info->spread_spectrum_percentage =
+				smu_info->gpuclk_ss_percentage;
+		ss_info->spread_spectrum_range =
+				smu_info->gpuclk_ss_rate_10hz * 10;
+		if (smu_info->gpuclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+			ss_info->type.CENTER_MODE = true;
+		break;
+	case AS_SIGNAL_TYPE_GPU_PLL:
+		/* atom_firmware: DAL only get data from dce_info table.
+		 * if data within smu_info is needed for DAL, VBIOS should
+		 * copy it into dce_info
+		 */
+		result = BP_RESULT_UNSUPPORTED;
+		break;
+	default:
+		result = BP_RESULT_UNSUPPORTED;
+	}
+
+	return result;
+}
+
+/**
+ * bios_parser_get_spread_spectrum_info
+ * Get spread spectrum information from the ASIC_InternalSS_Info(ver 2.1 or
+ * ver 3.1) or SS_Info table from the VBIOS. Currently ASIC_InternalSS_Info
+ * ver 2.1 can co-exist with SS_Info table. Expect ASIC_InternalSS_Info
+ * ver 3.1,
+ * there is only one entry for each signal /ss id.  However, there is
+ * no planning of supporting multiple spread Sprectum entry for EverGreen
+ * @param [in] this
+ * @param [in] signal, ASSignalType to be converted to info index
+ * @param [in] index, number of entries that match the converted info index
+ * @param [out] ss_info, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result bios_parser_get_spread_spectrum_info(
+	struct dc_bios *dcb,
+	enum as_signal_type signal,
+	uint32_t index,
+	struct spread_spectrum_info *ss_info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	enum bp_result result = BP_RESULT_UNSUPPORTED;
+	struct atom_common_table_header *header;
+	struct atom_data_revision tbl_revision;
+
+	if (!ss_info) /* check for bad input */
+		return BP_RESULT_BADINPUT;
+
+	if (!DATA_TABLES(dce_info))
+		return BP_RESULT_UNSUPPORTED;
+
+	header = GET_IMAGE(struct atom_common_table_header,
+						DATA_TABLES(dce_info));
+	get_atom_data_table_revision(header, &tbl_revision);
+
+	switch (tbl_revision.major) {
+	case 4:
+		switch (tbl_revision.minor) {
+		case 1:
+			return get_ss_info_v4_1(bp, signal, index, ss_info);
+		case 2:
+			return get_ss_info_v4_2(bp, signal, index, ss_info);
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+	/* there can not be more then one entry for SS Info table */
+	return result;
+}
+
+static enum bp_result get_embedded_panel_info_v2_1(
+	struct bios_parser *bp,
+	struct embedded_panel_info *info)
+{
+	struct lcd_info_v2_1 *lvds;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	if (!DATA_TABLES(lcd_info))
+		return BP_RESULT_UNSUPPORTED;
+
+	lvds = GET_IMAGE(struct lcd_info_v2_1, DATA_TABLES(lcd_info));
+
+	if (!lvds)
+		return BP_RESULT_BADBIOSTABLE;
+
+	/* TODO: previous vv1_3, should v2_1 */
+	if (!((lvds->table_header.format_revision == 2)
+			&& (lvds->table_header.content_revision >= 1)))
+		return BP_RESULT_UNSUPPORTED;
+
+	memset(info, 0, sizeof(struct embedded_panel_info));
+
+	/* We need to convert from 10KHz units into KHz units */
+	info->lcd_timing.pixel_clk =
+			le16_to_cpu(lvds->lcd_timing.pixclk) * 10;
+	/* usHActive does not include borders, according to VBIOS team */
+	info->lcd_timing.horizontal_addressable =
+			le16_to_cpu(lvds->lcd_timing.h_active);
+	/* usHBlanking_Time includes borders, so we should really be
+	 * subtractingborders duing this translation, but LVDS generally
+	 * doesn't have borders, so we should be okay leaving this as is for
+	 * now.  May need to revisit if we ever have LVDS with borders
+	 */
+	info->lcd_timing.horizontal_blanking_time =
+		le16_to_cpu(lvds->lcd_timing.h_blanking_time);
+	/* usVActive does not include borders, according to VBIOS team*/
+	info->lcd_timing.vertical_addressable =
+		le16_to_cpu(lvds->lcd_timing.v_active);
+	/* usVBlanking_Time includes borders, so we should really be
+	 * subtracting borders duing this translation, but LVDS generally
+	 * doesn't have borders, so we should be okay leaving this as is for
+	 * now. May need to revisit if we ever have LVDS with borders
+	 */
+	info->lcd_timing.vertical_blanking_time =
+		le16_to_cpu(lvds->lcd_timing.v_blanking_time);
+	info->lcd_timing.horizontal_sync_offset =
+		le16_to_cpu(lvds->lcd_timing.h_sync_offset);
+	info->lcd_timing.horizontal_sync_width =
+		le16_to_cpu(lvds->lcd_timing.h_sync_width);
+	info->lcd_timing.vertical_sync_offset =
+		le16_to_cpu(lvds->lcd_timing.v_sync_offset);
+	info->lcd_timing.vertical_sync_width =
+		le16_to_cpu(lvds->lcd_timing.v_syncwidth);
+	info->lcd_timing.horizontal_border = lvds->lcd_timing.h_border;
+	info->lcd_timing.vertical_border = lvds->lcd_timing.v_border;
+
+	/* not provided by VBIOS */
+	info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF = 0;
+
+	info->lcd_timing.misc_info.H_SYNC_POLARITY =
+		~(uint32_t)
+		(lvds->lcd_timing.miscinfo & ATOM_HSYNC_POLARITY);
+	info->lcd_timing.misc_info.V_SYNC_POLARITY =
+		~(uint32_t)
+		(lvds->lcd_timing.miscinfo & ATOM_VSYNC_POLARITY);
+
+	/* not provided by VBIOS */
+	info->lcd_timing.misc_info.VERTICAL_CUT_OFF = 0;
+
+	info->lcd_timing.misc_info.H_REPLICATION_BY2 =
+		!!(lvds->lcd_timing.miscinfo & ATOM_H_REPLICATIONBY2);
+	info->lcd_timing.misc_info.V_REPLICATION_BY2 =
+		!!(lvds->lcd_timing.miscinfo & ATOM_V_REPLICATIONBY2);
+	info->lcd_timing.misc_info.COMPOSITE_SYNC =
+		!!(lvds->lcd_timing.miscinfo & ATOM_COMPOSITESYNC);
+	info->lcd_timing.misc_info.INTERLACE =
+		!!(lvds->lcd_timing.miscinfo & ATOM_INTERLACE);
+
+	/* not provided by VBIOS*/
+	info->lcd_timing.misc_info.DOUBLE_CLOCK = 0;
+	/* not provided by VBIOS*/
+	info->ss_id = 0;
+
+	info->realtek_eDPToLVDS =
+			!!(lvds->dplvdsrxid == eDP_TO_LVDS_REALTEK_ID);
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_embedded_panel_info(
+	struct dc_bios *dcb,
+	struct embedded_panel_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	struct atom_common_table_header *header;
+	struct atom_data_revision tbl_revision;
+
+	if (!DATA_TABLES(lcd_info))
+		return BP_RESULT_FAILURE;
+
+	header = GET_IMAGE(struct atom_common_table_header,
+					DATA_TABLES(lcd_info));
+
+	if (!header)
+		return BP_RESULT_BADBIOSTABLE;
+
+	get_atom_data_table_revision(header, &tbl_revision);
+
+
+	switch (tbl_revision.major) {
+	case 2:
+		switch (tbl_revision.minor) {
+		case 1:
+			return get_embedded_panel_info_v2_1(bp, info);
+		default:
+			break;
+		}
+	default:
+		break;
+	}
+
+	return BP_RESULT_FAILURE;
+}
+
+static uint32_t get_support_mask_for_device_id(struct device_id device_id)
+{
+	enum dal_device_type device_type = device_id.device_type;
+	uint32_t enum_id = device_id.enum_id;
+
+	switch (device_type) {
+	case DEVICE_TYPE_LCD:
+		switch (enum_id) {
+		case 1:
+			return ATOM_DISPLAY_LCD1_SUPPORT;
+		default:
+			break;
+		}
+		break;
+	case DEVICE_TYPE_DFP:
+		switch (enum_id) {
+		case 1:
+			return ATOM_DISPLAY_DFP1_SUPPORT;
+		case 2:
+			return ATOM_DISPLAY_DFP2_SUPPORT;
+		case 3:
+			return ATOM_DISPLAY_DFP3_SUPPORT;
+		case 4:
+			return ATOM_DISPLAY_DFP4_SUPPORT;
+		case 5:
+			return ATOM_DISPLAY_DFP5_SUPPORT;
+		case 6:
+			return ATOM_DISPLAY_DFP6_SUPPORT;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	};
+
+	/* Unidentified device ID, return empty support mask. */
+	return 0;
+}
+
+static bool bios_parser_is_device_id_supported(
+	struct dc_bios *dcb,
+	struct device_id id)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	uint32_t mask = get_support_mask_for_device_id(id);
+
+	return (le16_to_cpu(bp->object_info_tbl.v1_4->supporteddevices) &
+								mask) != 0;
+}
+
+static void bios_parser_post_init(
+	struct dc_bios *dcb)
+{
+	/* TODO for OPM module. Need implement later */
+}
+
+static uint32_t bios_parser_get_ss_entry_number(
+	struct dc_bios *dcb,
+	enum as_signal_type signal)
+{
+	/* TODO: DAL2 atomfirmware implementation does not need this.
+	 * why DAL3 need this?
+	 */
+	return 1;
+}
+
+static enum bp_result bios_parser_transmitter_control(
+	struct dc_bios *dcb,
+	struct bp_transmitter_control *cntl)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.transmitter_control)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.transmitter_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_encoder_control(
+	struct dc_bios *dcb,
+	struct bp_encoder_control *cntl)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.dig_encoder_control)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.dig_encoder_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_set_pixel_clock(
+	struct dc_bios *dcb,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.set_pixel_clock)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.set_pixel_clock(bp, bp_params);
+}
+
+static enum bp_result bios_parser_set_dce_clock(
+	struct dc_bios *dcb,
+	struct bp_set_dce_clock_parameters *bp_params)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.set_dce_clock)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.set_dce_clock(bp, bp_params);
+}
+
+static unsigned int bios_parser_get_smu_clock_info(
+	struct dc_bios *dcb)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.get_smu_clock_info)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.get_smu_clock_info(bp);
+}
+
+static enum bp_result bios_parser_program_crtc_timing(
+	struct dc_bios *dcb,
+	struct bp_hw_crtc_timing_parameters *bp_params)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.set_crtc_timing)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.set_crtc_timing(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_crtc(
+	struct dc_bios *dcb,
+	enum controller_id id,
+	bool enable)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.enable_crtc)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.enable_crtc(bp, id, enable);
+}
+
+static enum bp_result bios_parser_crtc_source_select(
+	struct dc_bios *dcb,
+	struct bp_crtc_source_select *bp_params)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.select_crtc_source)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.select_crtc_source(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_disp_power_gating(
+	struct dc_bios *dcb,
+	enum controller_id controller_id,
+	enum bp_pipe_control_action action)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+	if (!bp->cmd_tbl.enable_disp_power_gating)
+		return BP_RESULT_FAILURE;
+
+	return bp->cmd_tbl.enable_disp_power_gating(bp, controller_id,
+		action);
+}
+
+static bool bios_parser_is_accelerated_mode(
+	struct dc_bios *dcb)
+{
+	return bios_is_accelerated_mode(dcb);
+}
+
+
+/**
+ * bios_parser_set_scratch_critical_state
+ *
+ * @brief
+ *  update critical state bit in VBIOS scratch register
+ *
+ * @param
+ *  bool - to set or reset state
+ */
+static void bios_parser_set_scratch_critical_state(
+	struct dc_bios *dcb,
+	bool state)
+{
+	bios_set_scratch_critical_state(dcb, state);
+}
+
+static enum bp_result bios_parser_get_firmware_info(
+	struct dc_bios *dcb,
+	struct dc_firmware_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	enum bp_result result = BP_RESULT_BADBIOSTABLE;
+	struct atom_common_table_header *header;
+
+	struct atom_data_revision revision;
+
+	if (info && DATA_TABLES(firmwareinfo)) {
+		header = GET_IMAGE(struct atom_common_table_header,
+				DATA_TABLES(firmwareinfo));
+		get_atom_data_table_revision(header, &revision);
+		switch (revision.major) {
+		case 3:
+			switch (revision.minor) {
+			case 1:
+				result = get_firmware_info_v3_1(bp, info);
+				break;
+			default:
+				break;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+
+	return result;
+}
+
+static enum bp_result get_firmware_info_v3_1(
+	struct bios_parser *bp,
+	struct dc_firmware_info *info)
+{
+	struct atom_firmware_info_v3_1 *firmware_info;
+	struct atom_display_controller_info_v4_1 *dce_info = NULL;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	firmware_info = GET_IMAGE(struct atom_firmware_info_v3_1,
+			DATA_TABLES(firmwareinfo));
+
+	dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1,
+			DATA_TABLES(dce_info));
+
+	if (!firmware_info || !dce_info)
+		return BP_RESULT_BADBIOSTABLE;
+
+	memset(info, 0, sizeof(*info));
+
+	/* Pixel clock pll information. */
+	 /* We need to convert from 10KHz units into KHz units */
+	info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10;
+	info->default_engine_clk = firmware_info->bootup_sclk_in10khz * 10;
+
+	 /* 27MHz for Vega10: */
+	info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10;
+
+	/* Hardcode frequency if BIOS gives no DCE Ref Clk */
+	if (info->pll_info.crystal_frequency == 0)
+		info->pll_info.crystal_frequency = 27000;
+	/*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/
+	info->dp_phy_ref_clk     = dce_info->dpphy_refclk_10khz * 10;
+	info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10;
+
+	/* Get GPU PLL VCO Clock */
+
+	if (bp->cmd_tbl.get_smu_clock_info != NULL) {
+		/* VBIOS gives in 10KHz */
+		info->smu_gpu_pll_output_freq =
+				bp->cmd_tbl.get_smu_clock_info(bp) * 10;
+	}
+
+	return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_encoder_cap_info(
+	struct dc_bios *dcb,
+	struct graphics_object_id object_id,
+	struct bp_encoder_cap_info *info)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	struct atom_display_object_path_v2 *object;
+	struct atom_encoder_caps_record *record = NULL;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	object = get_bios_object(bp, object_id);
+
+	if (!object)
+		return BP_RESULT_BADINPUT;
+
+	record = get_encoder_cap_record(bp, object);
+	if (!record)
+		return BP_RESULT_NORECORD;
+
+	info->DP_HBR2_CAP = (record->encodercaps &
+			ATOM_ENCODER_CAP_RECORD_HBR2) ? 1 : 0;
+	info->DP_HBR2_EN = (record->encodercaps &
+			ATOM_ENCODER_CAP_RECORD_HBR2_EN) ? 1 : 0;
+	info->DP_HBR3_EN = (record->encodercaps &
+			ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0;
+	info->HDMI_6GB_EN = (record->encodercaps &
+			ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0;
+
+	return BP_RESULT_OK;
+}
+
+
+static struct atom_encoder_caps_record *get_encoder_cap_record(
+	struct bios_parser *bp,
+	struct atom_display_object_path_v2 *object)
+{
+	struct atom_common_record_header *header;
+	uint32_t offset;
+
+	if (!object) {
+		BREAK_TO_DEBUGGER(); /* Invalid object */
+		return NULL;
+	}
+
+	offset = object->encoder_recordoffset + bp->object_info_tbl_offset;
+
+	for (;;) {
+		header = GET_IMAGE(struct atom_common_record_header, offset);
+
+		if (!header)
+			return NULL;
+
+		offset += header->record_size;
+
+		if (header->record_type == LAST_RECORD_TYPE ||
+				!header->record_size)
+			break;
+
+		if (header->record_type != ATOM_ENCODER_CAP_RECORD_TYPE)
+			continue;
+
+		if (sizeof(struct atom_encoder_caps_record) <=
+							header->record_size)
+			return (struct atom_encoder_caps_record *)header;
+	}
+
+	return NULL;
+}
+
+/*
+ * get_integrated_info_v11
+ *
+ * @brief
+ * Get V8 integrated BIOS information
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ *                  BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result get_integrated_info_v11(
+	struct bios_parser *bp,
+	struct integrated_info *info)
+{
+	struct atom_integrated_system_info_v1_11 *info_v11;
+	uint32_t i;
+
+	info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
+					DATA_TABLES(integratedsysteminfo));
+
+	if (info_v11 == NULL)
+		return BP_RESULT_BADBIOSTABLE;
+
+	info->gpu_cap_info =
+	le32_to_cpu(info_v11->gpucapinfo);
+	/*
+	* system_config: Bit[0] = 0 : PCIE power gating disabled
+	*                       = 1 : PCIE power gating enabled
+	*                Bit[1] = 0 : DDR-PLL shut down disabled
+	*                       = 1 : DDR-PLL shut down enabled
+	*                Bit[2] = 0 : DDR-PLL power down disabled
+	*                       = 1 : DDR-PLL power down enabled
+	*/
+	info->system_config = le32_to_cpu(info_v11->system_config);
+	info->cpu_cap_info = le32_to_cpu(info_v11->cpucapinfo);
+	info->memory_type = info_v11->memorytype;
+	info->ma_channel_number = info_v11->umachannelnumber;
+	info->lvds_ss_percentage =
+	le16_to_cpu(info_v11->lvds_ss_percentage);
+	info->lvds_sspread_rate_in_10hz =
+	le16_to_cpu(info_v11->lvds_ss_rate_10hz);
+	info->hdmi_ss_percentage =
+	le16_to_cpu(info_v11->hdmi_ss_percentage);
+	info->hdmi_sspread_rate_in_10hz =
+	le16_to_cpu(info_v11->hdmi_ss_rate_10hz);
+	info->dvi_ss_percentage =
+	le16_to_cpu(info_v11->dvi_ss_percentage);
+	info->dvi_sspread_rate_in_10_hz =
+	le16_to_cpu(info_v11->dvi_ss_rate_10hz);
+	info->lvds_misc = info_v11->lvds_misc;
+	for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
+		info->ext_disp_conn_info.gu_id[i] =
+				info_v11->extdispconninfo.guid[i];
+	}
+
+	for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
+		info->ext_disp_conn_info.path[i].device_connector_id =
+		object_id_from_bios_object_id(
+		le16_to_cpu(info_v11->extdispconninfo.path[i].connectorobjid));
+
+		info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
+		object_id_from_bios_object_id(
+			le16_to_cpu(
+			info_v11->extdispconninfo.path[i].ext_encoder_objid));
+
+		info->ext_disp_conn_info.path[i].device_tag =
+			le16_to_cpu(
+				info_v11->extdispconninfo.path[i].device_tag);
+		info->ext_disp_conn_info.path[i].device_acpi_enum =
+		le16_to_cpu(
+			info_v11->extdispconninfo.path[i].device_acpi_enum);
+		info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
+			info_v11->extdispconninfo.path[i].auxddclut_index;
+		info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
+			info_v11->extdispconninfo.path[i].hpdlut_index;
+		info->ext_disp_conn_info.path[i].channel_mapping.raw =
+			info_v11->extdispconninfo.path[i].channelmapping;
+		info->ext_disp_conn_info.path[i].caps =
+				le16_to_cpu(info_v11->extdispconninfo.path[i].caps);
+	}
+	info->ext_disp_conn_info.checksum =
+	info_v11->extdispconninfo.checksum;
+
+	info->dp0_ext_hdmi_slv_addr = info_v11->dp0_retimer_set.HdmiSlvAddr;
+	info->dp0_ext_hdmi_reg_num = info_v11->dp0_retimer_set.HdmiRegNum;
+	for (i = 0; i < info->dp0_ext_hdmi_reg_num; i++) {
+		info->dp0_ext_hdmi_reg_settings[i].i2c_reg_index =
+				info_v11->dp0_retimer_set.HdmiRegSetting[i].ucI2cRegIndex;
+		info->dp0_ext_hdmi_reg_settings[i].i2c_reg_val =
+				info_v11->dp0_retimer_set.HdmiRegSetting[i].ucI2cRegVal;
+	}
+	info->dp0_ext_hdmi_6g_reg_num = info_v11->dp0_retimer_set.Hdmi6GRegNum;
+	for (i = 0; i < info->dp0_ext_hdmi_6g_reg_num; i++) {
+		info->dp0_ext_hdmi_6g_reg_settings[i].i2c_reg_index =
+				info_v11->dp0_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex;
+		info->dp0_ext_hdmi_6g_reg_settings[i].i2c_reg_val =
+				info_v11->dp0_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal;
+	}
+
+	info->dp1_ext_hdmi_slv_addr = info_v11->dp1_retimer_set.HdmiSlvAddr;
+	info->dp1_ext_hdmi_reg_num = info_v11->dp1_retimer_set.HdmiRegNum;
+	for (i = 0; i < info->dp1_ext_hdmi_reg_num; i++) {
+		info->dp1_ext_hdmi_reg_settings[i].i2c_reg_index =
+				info_v11->dp1_retimer_set.HdmiRegSetting[i].ucI2cRegIndex;
+		info->dp1_ext_hdmi_reg_settings[i].i2c_reg_val =
+				info_v11->dp1_retimer_set.HdmiRegSetting[i].ucI2cRegVal;
+	}
+	info->dp1_ext_hdmi_6g_reg_num = info_v11->dp1_retimer_set.Hdmi6GRegNum;
+	for (i = 0; i < info->dp1_ext_hdmi_6g_reg_num; i++) {
+		info->dp1_ext_hdmi_6g_reg_settings[i].i2c_reg_index =
+				info_v11->dp1_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex;
+		info->dp1_ext_hdmi_6g_reg_settings[i].i2c_reg_val =
+				info_v11->dp1_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal;
+	}
+
+	info->dp2_ext_hdmi_slv_addr = info_v11->dp2_retimer_set.HdmiSlvAddr;
+	info->dp2_ext_hdmi_reg_num = info_v11->dp2_retimer_set.HdmiRegNum;
+	for (i = 0; i < info->dp2_ext_hdmi_reg_num; i++) {
+		info->dp2_ext_hdmi_reg_settings[i].i2c_reg_index =
+				info_v11->dp2_retimer_set.HdmiRegSetting[i].ucI2cRegIndex;
+		info->dp2_ext_hdmi_reg_settings[i].i2c_reg_val =
+				info_v11->dp2_retimer_set.HdmiRegSetting[i].ucI2cRegVal;
+	}
+	info->dp2_ext_hdmi_6g_reg_num = info_v11->dp2_retimer_set.Hdmi6GRegNum;
+	for (i = 0; i < info->dp2_ext_hdmi_6g_reg_num; i++) {
+		info->dp2_ext_hdmi_6g_reg_settings[i].i2c_reg_index =
+				info_v11->dp2_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex;
+		info->dp2_ext_hdmi_6g_reg_settings[i].i2c_reg_val =
+				info_v11->dp2_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal;
+	}
+
+	info->dp3_ext_hdmi_slv_addr = info_v11->dp3_retimer_set.HdmiSlvAddr;
+	info->dp3_ext_hdmi_reg_num = info_v11->dp3_retimer_set.HdmiRegNum;
+	for (i = 0; i < info->dp3_ext_hdmi_reg_num; i++) {
+		info->dp3_ext_hdmi_reg_settings[i].i2c_reg_index =
+				info_v11->dp3_retimer_set.HdmiRegSetting[i].ucI2cRegIndex;
+		info->dp3_ext_hdmi_reg_settings[i].i2c_reg_val =
+				info_v11->dp3_retimer_set.HdmiRegSetting[i].ucI2cRegVal;
+	}
+	info->dp3_ext_hdmi_6g_reg_num = info_v11->dp3_retimer_set.Hdmi6GRegNum;
+	for (i = 0; i < info->dp3_ext_hdmi_6g_reg_num; i++) {
+		info->dp3_ext_hdmi_6g_reg_settings[i].i2c_reg_index =
+				info_v11->dp3_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex;
+		info->dp3_ext_hdmi_6g_reg_settings[i].i2c_reg_val =
+				info_v11->dp3_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal;
+	}
+
+
+	/** TODO - review **/
+	#if 0
+	info->boot_up_engine_clock = le32_to_cpu(info_v11->ulBootUpEngineClock)
+									* 10;
+	info->dentist_vco_freq = le32_to_cpu(info_v11->ulDentistVCOFreq) * 10;
+	info->boot_up_uma_clock = le32_to_cpu(info_v8->ulBootUpUMAClock) * 10;
+
+	for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+		/* Convert [10KHz] into [KHz] */
+		info->disp_clk_voltage[i].max_supported_clk =
+		le32_to_cpu(info_v11->sDISPCLK_Voltage[i].
+			ulMaximumSupportedCLK) * 10;
+		info->disp_clk_voltage[i].voltage_index =
+		le32_to_cpu(info_v11->sDISPCLK_Voltage[i].ulVoltageIndex);
+	}
+
+	info->boot_up_req_display_vector =
+			le32_to_cpu(info_v11->ulBootUpReqDisplayVector);
+	info->boot_up_nb_voltage =
+			le16_to_cpu(info_v11->usBootUpNBVoltage);
+	info->ext_disp_conn_info_offset =
+			le16_to_cpu(info_v11->usExtDispConnInfoOffset);
+	info->gmc_restore_reset_time =
+			le32_to_cpu(info_v11->ulGMCRestoreResetTime);
+	info->minimum_n_clk =
+			le32_to_cpu(info_v11->ulNbpStateNClkFreq[0]);
+	for (i = 1; i < 4; ++i)
+		info->minimum_n_clk =
+				info->minimum_n_clk <
+				le32_to_cpu(info_v11->ulNbpStateNClkFreq[i]) ?
+				info->minimum_n_clk : le32_to_cpu(
+					info_v11->ulNbpStateNClkFreq[i]);
+
+	info->idle_n_clk = le32_to_cpu(info_v11->ulIdleNClk);
+	info->ddr_dll_power_up_time =
+	    le32_to_cpu(info_v11->ulDDR_DLL_PowerUpTime);
+	info->ddr_pll_power_up_time =
+		le32_to_cpu(info_v11->ulDDR_PLL_PowerUpTime);
+	info->pcie_clk_ss_type = le16_to_cpu(info_v11->usPCIEClkSSType);
+	info->max_lvds_pclk_freq_in_single_link =
+		le16_to_cpu(info_v11->usMaxLVDSPclkFreqInSingleLink);
+	info->max_lvds_pclk_freq_in_single_link =
+		le16_to_cpu(info_v11->usMaxLVDSPclkFreqInSingleLink);
+	info->lvds_pwr_on_seq_dig_on_to_de_in_4ms =
+		info_v11->ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+	info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms =
+		info_v11->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+	info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms =
+		info_v11->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+	info->lvds_pwr_off_seq_vary_bl_to_de_in4ms =
+		info_v11->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+	info->lvds_pwr_off_seq_de_to_dig_on_in4ms =
+		info_v11->ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+	info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms =
+		info_v11->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+	info->lvds_off_to_on_delay_in_4ms =
+		info_v11->ucLVDSOffToOnDelay_in4Ms;
+	info->lvds_bit_depth_control_val =
+		le32_to_cpu(info_v11->ulLCDBitDepthControlVal);
+
+	for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) {
+		/* Convert [10KHz] into [KHz] */
+		info->avail_s_clk[i].supported_s_clk =
+			le32_to_cpu(info_v11->sAvail_SCLK[i].ulSupportedSCLK)
+									* 10;
+		info->avail_s_clk[i].voltage_index =
+			le16_to_cpu(info_v11->sAvail_SCLK[i].usVoltageIndex);
+		info->avail_s_clk[i].voltage_id =
+			le16_to_cpu(info_v11->sAvail_SCLK[i].usVoltageID);
+	}
+	#endif /* TODO*/
+
+	return BP_RESULT_OK;
+}
+
+
+/*
+ * construct_integrated_info
+ *
+ * @brief
+ * Get integrated BIOS information based on table revision
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ *                  BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result construct_integrated_info(
+	struct bios_parser *bp,
+	struct integrated_info *info)
+{
+	enum bp_result result = BP_RESULT_BADBIOSTABLE;
+
+	struct atom_common_table_header *header;
+	struct atom_data_revision revision;
+
+	struct clock_voltage_caps temp = {0, 0};
+	uint32_t i;
+	uint32_t j;
+
+	if (info && DATA_TABLES(integratedsysteminfo)) {
+		header = GET_IMAGE(struct atom_common_table_header,
+					DATA_TABLES(integratedsysteminfo));
+
+		get_atom_data_table_revision(header, &revision);
+
+		/* Don't need to check major revision as they are all 1 */
+		switch (revision.minor) {
+		case 11:
+			result = get_integrated_info_v11(bp, info);
+			break;
+		default:
+			return result;
+		}
+	}
+
+	if (result != BP_RESULT_OK)
+		return result;
+
+	/* Sort voltage table from low to high*/
+	for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+		for (j = i; j > 0; --j) {
+			if (info->disp_clk_voltage[j].max_supported_clk <
+				info->disp_clk_voltage[j-1].max_supported_clk
+				) {
+				/* swap j and j - 1*/
+				temp = info->disp_clk_voltage[j-1];
+				info->disp_clk_voltage[j-1] =
+					info->disp_clk_voltage[j];
+				info->disp_clk_voltage[j] = temp;
+			}
+		}
+	}
+
+	return result;
+}
+
+static struct integrated_info *bios_parser_create_integrated_info(
+	struct dc_bios *dcb)
+{
+	struct bios_parser *bp = BP_FROM_DCB(dcb);
+	struct integrated_info *info = NULL;
+
+	info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL);
+
+	if (info == NULL) {
+		ASSERT_CRITICAL(0);
+		return NULL;
+	}
+
+	if (construct_integrated_info(bp, info) == BP_RESULT_OK)
+		return info;
+
+	kfree(info);
+
+	return NULL;
+}
+
+static const struct dc_vbios_funcs vbios_funcs = {
+	.get_connectors_number = bios_parser_get_connectors_number,
+
+	.get_encoder_id = bios_parser_get_encoder_id,
+
+	.get_connector_id = bios_parser_get_connector_id,
+
+	.get_dst_number = bios_parser_get_dst_number,
+
+	.get_src_obj = bios_parser_get_src_obj,
+
+	.get_dst_obj = bios_parser_get_dst_obj,
+
+	.get_i2c_info = bios_parser_get_i2c_info,
+
+	.get_voltage_ddc_info = bios_parser_get_voltage_ddc_info,
+
+	.get_thermal_ddc_info = bios_parser_get_thermal_ddc_info,
+
+	.get_hpd_info = bios_parser_get_hpd_info,
+
+	.get_device_tag = bios_parser_get_device_tag,
+
+	.get_firmware_info = bios_parser_get_firmware_info,
+
+	.get_spread_spectrum_info = bios_parser_get_spread_spectrum_info,
+
+	.get_ss_entry_number = bios_parser_get_ss_entry_number,
+
+	.get_embedded_panel_info = bios_parser_get_embedded_panel_info,
+
+	.get_gpio_pin_info = bios_parser_get_gpio_pin_info,
+
+	.get_encoder_cap_info = bios_parser_get_encoder_cap_info,
+
+	.is_device_id_supported = bios_parser_is_device_id_supported,
+
+
+
+	.is_accelerated_mode = bios_parser_is_accelerated_mode,
+
+	.set_scratch_critical_state = bios_parser_set_scratch_critical_state,
+
+
+/*	 COMMANDS */
+	.encoder_control = bios_parser_encoder_control,
+
+	.transmitter_control = bios_parser_transmitter_control,
+
+	.enable_crtc = bios_parser_enable_crtc,
+
+	.set_pixel_clock = bios_parser_set_pixel_clock,
+
+	.set_dce_clock = bios_parser_set_dce_clock,
+
+	.program_crtc_timing = bios_parser_program_crtc_timing,
+
+	/* .blank_crtc = bios_parser_blank_crtc, */
+
+	.crtc_source_select = bios_parser_crtc_source_select,
+
+	/* .external_encoder_control = bios_parser_external_encoder_control, */
+
+	.enable_disp_power_gating = bios_parser_enable_disp_power_gating,
+
+	.post_init = bios_parser_post_init,
+
+	.bios_parser_destroy = firmware_parser_destroy,
+
+	.get_smu_clock_info = bios_parser_get_smu_clock_info,
+};
+
+static bool bios_parser_construct(
+	struct bios_parser *bp,
+	struct bp_init_data *init,
+	enum dce_version dce_version)
+{
+	uint16_t *rom_header_offset = NULL;
+	struct atom_rom_header_v2_2 *rom_header = NULL;
+	struct display_object_info_table_v1_4 *object_info_tbl;
+	struct atom_data_revision tbl_rev = {0};
+
+	if (!init)
+		return false;
+
+	if (!init->bios)
+		return false;
+
+	bp->base.funcs = &vbios_funcs;
+	bp->base.bios = init->bios;
+	bp->base.bios_size = bp->base.bios[OFFSET_TO_ATOM_ROM_IMAGE_SIZE] * BIOS_IMAGE_SIZE_UNIT;
+
+	bp->base.ctx = init->ctx;
+
+	bp->base.bios_local_image = NULL;
+
+	rom_header_offset =
+			GET_IMAGE(uint16_t, OFFSET_TO_ATOM_ROM_HEADER_POINTER);
+
+	if (!rom_header_offset)
+		return false;
+
+	rom_header = GET_IMAGE(struct atom_rom_header_v2_2, *rom_header_offset);
+
+	if (!rom_header)
+		return false;
+
+	get_atom_data_table_revision(&rom_header->table_header, &tbl_rev);
+	if (!(tbl_rev.major >= 2 && tbl_rev.minor >= 2))
+		return false;
+
+	bp->master_data_tbl =
+		GET_IMAGE(struct atom_master_data_table_v2_1,
+				rom_header->masterdatatable_offset);
+
+	if (!bp->master_data_tbl)
+		return false;
+
+	bp->object_info_tbl_offset = DATA_TABLES(displayobjectinfo);
+
+	if (!bp->object_info_tbl_offset)
+		return false;
+
+	object_info_tbl =
+			GET_IMAGE(struct display_object_info_table_v1_4,
+						bp->object_info_tbl_offset);
+
+	if (!object_info_tbl)
+		return false;
+
+	get_atom_data_table_revision(&object_info_tbl->table_header,
+		&bp->object_info_tbl.revision);
+
+	if (bp->object_info_tbl.revision.major == 1
+		&& bp->object_info_tbl.revision.minor >= 4) {
+		struct display_object_info_table_v1_4 *tbl_v1_4;
+
+		tbl_v1_4 = GET_IMAGE(struct display_object_info_table_v1_4,
+			bp->object_info_tbl_offset);
+		if (!tbl_v1_4)
+			return false;
+
+		bp->object_info_tbl.v1_4 = tbl_v1_4;
+	} else
+		return false;
+
+	dal_firmware_parser_init_cmd_tbl(bp);
+	dal_bios_parser_init_cmd_tbl_helper2(&bp->cmd_helper, dce_version);
+
+	bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
+
+	return true;
+}
+
+struct dc_bios *firmware_parser_create(
+	struct bp_init_data *init,
+	enum dce_version dce_version)
+{
+	struct bios_parser *bp = NULL;
+
+	bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL);
+	if (!bp)
+		return NULL;
+
+	if (bios_parser_construct(bp, init, dce_version))
+		return &bp->base;
+
+	kfree(bp);
+	return NULL;
+}
+
+

+ 33 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.h

@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER2_H__
+#define __DAL_BIOS_PARSER2_H__
+
+struct dc_bios *firmware_parser_create(
+	struct bp_init_data *init,
+	enum dce_version dce_version);
+
+#endif

+ 288 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c

@@ -0,0 +1,288 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "bios_parser_common.h"
+#include "include/grph_object_ctrl_defs.h"
+
+static enum object_type object_type_from_bios_object_id(uint32_t bios_object_id)
+{
+	uint32_t bios_object_type = (bios_object_id & OBJECT_TYPE_MASK)
+				>> OBJECT_TYPE_SHIFT;
+	enum object_type object_type;
+
+	switch (bios_object_type) {
+	case GRAPH_OBJECT_TYPE_GPU:
+		object_type = OBJECT_TYPE_GPU;
+		break;
+	case GRAPH_OBJECT_TYPE_ENCODER:
+		object_type = OBJECT_TYPE_ENCODER;
+		break;
+	case GRAPH_OBJECT_TYPE_CONNECTOR:
+		object_type = OBJECT_TYPE_CONNECTOR;
+		break;
+	case GRAPH_OBJECT_TYPE_ROUTER:
+		object_type = OBJECT_TYPE_ROUTER;
+		break;
+	case GRAPH_OBJECT_TYPE_GENERIC:
+		object_type = OBJECT_TYPE_GENERIC;
+		break;
+	default:
+		object_type = OBJECT_TYPE_UNKNOWN;
+		break;
+	}
+
+	return object_type;
+}
+
+static enum object_enum_id enum_id_from_bios_object_id(uint32_t bios_object_id)
+{
+	uint32_t bios_enum_id =
+			(bios_object_id & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+	enum object_enum_id id;
+
+	switch (bios_enum_id) {
+	case GRAPH_OBJECT_ENUM_ID1:
+		id = ENUM_ID_1;
+		break;
+	case GRAPH_OBJECT_ENUM_ID2:
+		id = ENUM_ID_2;
+		break;
+	case GRAPH_OBJECT_ENUM_ID3:
+		id = ENUM_ID_3;
+		break;
+	case GRAPH_OBJECT_ENUM_ID4:
+		id = ENUM_ID_4;
+		break;
+	case GRAPH_OBJECT_ENUM_ID5:
+		id = ENUM_ID_5;
+		break;
+	case GRAPH_OBJECT_ENUM_ID6:
+		id = ENUM_ID_6;
+		break;
+	case GRAPH_OBJECT_ENUM_ID7:
+		id = ENUM_ID_7;
+		break;
+	default:
+		id = ENUM_ID_UNKNOWN;
+		break;
+	}
+
+	return id;
+}
+
+static uint32_t gpu_id_from_bios_object_id(uint32_t bios_object_id)
+{
+	return (bios_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+}
+
+static enum encoder_id encoder_id_from_bios_object_id(uint32_t bios_object_id)
+{
+	uint32_t bios_encoder_id = gpu_id_from_bios_object_id(bios_object_id);
+	enum encoder_id id;
+
+	switch (bios_encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		id = ENCODER_ID_INTERNAL_LVDS;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+		id = ENCODER_ID_INTERNAL_TMDS1;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS2:
+		id = ENCODER_ID_INTERNAL_TMDS2;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+		id = ENCODER_ID_INTERNAL_DAC1;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+		id = ENCODER_ID_INTERNAL_DAC2;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		id = ENCODER_ID_INTERNAL_LVTM1;
+		break;
+	case ENCODER_OBJECT_ID_HDMI_INTERNAL:
+		id = ENCODER_ID_INTERNAL_HDMI;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+		id = ENCODER_ID_INTERNAL_KLDSCP_TMDS1;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+		id = ENCODER_ID_INTERNAL_KLDSCP_DAC1;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		id = ENCODER_ID_INTERNAL_KLDSCP_DAC2;
+		break;
+	case ENCODER_OBJECT_ID_MVPU_FPGA:
+		id = ENCODER_ID_EXTERNAL_MVPU_FPGA;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+		id = ENCODER_ID_INTERNAL_DDI;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+		id = ENCODER_ID_INTERNAL_UNIPHY;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		id = ENCODER_ID_INTERNAL_KLDSCP_LVTMA;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+		id = ENCODER_ID_INTERNAL_UNIPHY1;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		id = ENCODER_ID_INTERNAL_UNIPHY2;
+		break;
+	case ENCODER_OBJECT_ID_ALMOND: /* ENCODER_OBJECT_ID_NUTMEG */
+		id = ENCODER_ID_EXTERNAL_NUTMEG;
+		break;
+	case ENCODER_OBJECT_ID_TRAVIS:
+		id = ENCODER_ID_EXTERNAL_TRAVIS;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+		id = ENCODER_ID_INTERNAL_UNIPHY3;
+		break;
+	default:
+		id = ENCODER_ID_UNKNOWN;
+		ASSERT(0);
+		break;
+	}
+
+	return id;
+}
+
+static enum connector_id connector_id_from_bios_object_id(
+	uint32_t bios_object_id)
+{
+	uint32_t bios_connector_id = gpu_id_from_bios_object_id(bios_object_id);
+
+	enum connector_id id;
+
+	switch (bios_connector_id) {
+	case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I:
+		id = CONNECTOR_ID_SINGLE_LINK_DVII;
+		break;
+	case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I:
+		id = CONNECTOR_ID_DUAL_LINK_DVII;
+		break;
+	case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D:
+		id = CONNECTOR_ID_SINGLE_LINK_DVID;
+		break;
+	case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D:
+		id = CONNECTOR_ID_DUAL_LINK_DVID;
+		break;
+	case CONNECTOR_OBJECT_ID_VGA:
+		id = CONNECTOR_ID_VGA;
+		break;
+	case CONNECTOR_OBJECT_ID_HDMI_TYPE_A:
+		id = CONNECTOR_ID_HDMI_TYPE_A;
+		break;
+	case CONNECTOR_OBJECT_ID_LVDS:
+		id = CONNECTOR_ID_LVDS;
+		break;
+	case CONNECTOR_OBJECT_ID_PCIE_CONNECTOR:
+		id = CONNECTOR_ID_PCIE;
+		break;
+	case CONNECTOR_OBJECT_ID_HARDCODE_DVI:
+		id = CONNECTOR_ID_HARDCODE_DVI;
+		break;
+	case CONNECTOR_OBJECT_ID_DISPLAYPORT:
+		id = CONNECTOR_ID_DISPLAY_PORT;
+		break;
+	case CONNECTOR_OBJECT_ID_eDP:
+		id = CONNECTOR_ID_EDP;
+		break;
+	case CONNECTOR_OBJECT_ID_MXM:
+		id = CONNECTOR_ID_MXM;
+		break;
+	default:
+		id = CONNECTOR_ID_UNKNOWN;
+		break;
+	}
+
+	return id;
+}
+
+static enum generic_id generic_id_from_bios_object_id(uint32_t bios_object_id)
+{
+	uint32_t bios_generic_id = gpu_id_from_bios_object_id(bios_object_id);
+
+	enum generic_id id;
+
+	switch (bios_generic_id) {
+	case GENERIC_OBJECT_ID_MXM_OPM:
+		id = GENERIC_ID_MXM_OPM;
+		break;
+	case GENERIC_OBJECT_ID_GLSYNC:
+		id = GENERIC_ID_GLSYNC;
+		break;
+	case GENERIC_OBJECT_ID_STEREO_PIN:
+		id = GENERIC_ID_STEREO;
+		break;
+	default:
+		id = GENERIC_ID_UNKNOWN;
+		break;
+	}
+
+	return id;
+}
+
+static uint32_t id_from_bios_object_id(enum object_type type,
+	uint32_t bios_object_id)
+{
+	switch (type) {
+	case OBJECT_TYPE_GPU:
+		return gpu_id_from_bios_object_id(bios_object_id);
+	case OBJECT_TYPE_ENCODER:
+		return (uint32_t)encoder_id_from_bios_object_id(bios_object_id);
+	case OBJECT_TYPE_CONNECTOR:
+		return (uint32_t)connector_id_from_bios_object_id(
+				bios_object_id);
+	case OBJECT_TYPE_GENERIC:
+		return generic_id_from_bios_object_id(bios_object_id);
+	default:
+		return 0;
+	}
+}
+
+struct graphics_object_id object_id_from_bios_object_id(uint32_t bios_object_id)
+{
+	enum object_type type;
+	enum object_enum_id enum_id;
+	struct graphics_object_id go_id = { 0 };
+
+	type = object_type_from_bios_object_id(bios_object_id);
+
+	if (OBJECT_TYPE_UNKNOWN == type)
+		return go_id;
+
+	enum_id = enum_id_from_bios_object_id(bios_object_id);
+
+	if (ENUM_ID_UNKNOWN == enum_id)
+		return go_id;
+
+	go_id = dal_graphics_object_id_init(
+			id_from_bios_object_id(type, bios_object_id), enum_id, type);
+
+	return go_id;
+}
+
+

+ 33 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.h

@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __BIOS_PARSER_COMMON_H__
+#define __BIOS_PARSER_COMMON_H__
+
+#include "dm_services.h"
+#include "ObjectID.h"
+
+struct graphics_object_id object_id_from_bios_object_id(uint32_t bios_object_id);
+#endif

+ 82 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c

@@ -0,0 +1,82 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+#include "bios_parser_helper.h"
+#include "command_table_helper.h"
+#include "command_table.h"
+#include "bios_parser_types_internal.h"
+
+uint8_t *bios_get_image(struct dc_bios *bp,
+	uint32_t offset,
+	uint32_t size)
+{
+	if (bp->bios && offset + size < bp->bios_size)
+		return bp->bios + offset;
+	else
+		return NULL;
+}
+
+#include "reg_helper.h"
+
+#define CTX \
+	bios->ctx
+#define REG(reg)\
+	(bios->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+		ATOM_ ## field_name ## _SHIFT, ATOM_ ## field_name
+
+bool bios_is_accelerated_mode(
+	struct dc_bios *bios)
+{
+	uint32_t acc_mode;
+	REG_GET(BIOS_SCRATCH_6, S6_ACC_MODE, &acc_mode);
+	return (acc_mode == 1);
+}
+
+
+void bios_set_scratch_acc_mode_change(
+	struct dc_bios *bios)
+{
+	REG_UPDATE(BIOS_SCRATCH_6, S6_ACC_MODE, 1);
+}
+
+
+void bios_set_scratch_critical_state(
+	struct dc_bios *bios,
+	bool state)
+{
+	uint32_t critial_state = state ? 1 : 0;
+	REG_UPDATE(BIOS_SCRATCH_6, S6_CRITICAL_STATE, critial_state);
+}
+
+
+

+ 40 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h

@@ -0,0 +1,40 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_HELPER_H__
+#define __DAL_BIOS_PARSER_HELPER_H__
+
+struct bios_parser;
+
+uint8_t *bios_get_image(struct dc_bios *bp, uint32_t offset,
+	uint32_t size);
+
+bool bios_is_accelerated_mode(struct dc_bios *bios);
+void bios_set_scratch_acc_mode_change(struct dc_bios *bios);
+void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
+
+#define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type)))
+
+#endif

+ 56 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c

@@ -0,0 +1,56 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/logger_interface.h"
+
+#include "bios_parser_interface.h"
+#include "bios_parser.h"
+
+#include "bios_parser2.h"
+
+
+struct dc_bios *dal_bios_parser_create(
+	struct bp_init_data *init,
+	enum dce_version dce_version)
+{
+	struct dc_bios *bios = NULL;
+
+	bios = firmware_parser_create(init, dce_version);
+
+	/* Fall back to old bios parser for older asics */
+	if (bios == NULL)
+		bios = bios_parser_create(init, dce_version);
+
+	return bios;
+}
+
+void dal_bios_parser_destroy(struct dc_bios **dcb)
+{
+	struct dc_bios *bios = *dcb;
+
+	bios->funcs->bios_parser_destroy(dcb);
+}
+

+ 72 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h

@@ -0,0 +1,72 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_TYPES_BIOS_H__
+#define __DAL_BIOS_PARSER_TYPES_BIOS_H__
+
+#include "dc_bios_types.h"
+#include "bios_parser_helper.h"
+
+struct atom_data_revision {
+	uint32_t major;
+	uint32_t minor;
+};
+
+struct object_info_table {
+	struct atom_data_revision revision;
+	union {
+		ATOM_OBJECT_HEADER *v1_1;
+		ATOM_OBJECT_HEADER_V3 *v1_3;
+	};
+};
+
+enum spread_spectrum_id {
+	SS_ID_UNKNOWN = 0,
+	SS_ID_DP1 = 0xf1,
+	SS_ID_DP2 = 0xf2,
+	SS_ID_LVLINK_2700MHZ = 0xf3,
+	SS_ID_LVLINK_1620MHZ = 0xf4
+};
+
+struct bios_parser {
+	struct dc_bios base;
+
+	struct object_info_table object_info_tbl;
+	uint32_t object_info_tbl_offset;
+	ATOM_MASTER_DATA_TABLE *master_data_tbl;
+
+	const struct bios_parser_helper *bios_helper;
+
+	const struct command_table_helper *cmd_helper;
+	struct cmd_tbl cmd_tbl;
+
+	bool remap_device_tags;
+};
+
+/* Bios Parser from DC Bios */
+#define BP_FROM_DCB(dc_bios) \
+	container_of(dc_bios, struct bios_parser, base)
+
+#endif

+ 74 - 0
drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal2.h

@@ -0,0 +1,74 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_TYPES_BIOS2_H__
+#define __DAL_BIOS_PARSER_TYPES_BIOS2_H__
+
+#include "dc_bios_types.h"
+#include "bios_parser_helper.h"
+
+/* use atomfirmware_bringup.h only. Not atombios.h anymore */
+
+struct atom_data_revision {
+	uint32_t major;
+	uint32_t minor;
+};
+
+struct object_info_table {
+	struct atom_data_revision revision;
+	union {
+		struct display_object_info_table_v1_4 *v1_4;
+	};
+};
+
+enum spread_spectrum_id {
+	SS_ID_UNKNOWN = 0,
+	SS_ID_DP1 = 0xf1,
+	SS_ID_DP2 = 0xf2,
+	SS_ID_LVLINK_2700MHZ = 0xf3,
+	SS_ID_LVLINK_1620MHZ = 0xf4
+};
+
+struct bios_parser {
+	struct dc_bios base;
+
+	struct object_info_table object_info_tbl;
+	uint32_t object_info_tbl_offset;
+	struct atom_master_data_table_v2_1 *master_data_tbl;
+
+
+	const struct bios_parser_helper *bios_helper;
+
+	const struct command_table_helper *cmd_helper;
+	struct cmd_tbl cmd_tbl;
+
+	bool remap_device_tags;
+};
+
+/* Bios Parser from DC Bios */
+#define BP_FROM_DCB(dc_bios) \
+	container_of(dc_bios, struct bios_parser, base)
+
+#endif

+ 2424 - 0
drivers/gpu/drm/amd/display/dc/bios/command_table.c

@@ -0,0 +1,2424 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_interface.h"
+
+#include "command_table.h"
+#include "command_table_helper.h"
+#include "bios_parser_helper.h"
+#include "bios_parser_types_internal.h"
+
+#define EXEC_BIOS_CMD_TABLE(command, params)\
+	(cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \
+		GetIndexIntoMasterTable(COMMAND, command), \
+		&params) == 0)
+
+#define BIOS_CMD_TABLE_REVISION(command, frev, crev)\
+	cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \
+		GetIndexIntoMasterTable(COMMAND, command), &frev, &crev)
+
+#define BIOS_CMD_TABLE_PARA_REVISION(command)\
+	bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \
+		GetIndexIntoMasterTable(COMMAND, command))
+
+static void init_dig_encoder_control(struct bios_parser *bp);
+static void init_transmitter_control(struct bios_parser *bp);
+static void init_set_pixel_clock(struct bios_parser *bp);
+static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp);
+static void init_adjust_display_pll(struct bios_parser *bp);
+static void init_dac_encoder_control(struct bios_parser *bp);
+static void init_dac_output_control(struct bios_parser *bp);
+static void init_set_crtc_timing(struct bios_parser *bp);
+static void init_select_crtc_source(struct bios_parser *bp);
+static void init_enable_crtc(struct bios_parser *bp);
+static void init_enable_crtc_mem_req(struct bios_parser *bp);
+static void init_external_encoder_control(struct bios_parser *bp);
+static void init_enable_disp_power_gating(struct bios_parser *bp);
+static void init_program_clock(struct bios_parser *bp);
+static void init_set_dce_clock(struct bios_parser *bp);
+
+void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
+{
+	init_dig_encoder_control(bp);
+	init_transmitter_control(bp);
+	init_set_pixel_clock(bp);
+	init_enable_spread_spectrum_on_ppll(bp);
+	init_adjust_display_pll(bp);
+	init_dac_encoder_control(bp);
+	init_dac_output_control(bp);
+	init_set_crtc_timing(bp);
+	init_select_crtc_source(bp);
+	init_enable_crtc(bp);
+	init_enable_crtc_mem_req(bp);
+	init_program_clock(bp);
+	init_external_encoder_control(bp);
+	init_enable_disp_power_gating(bp);
+	init_set_dce_clock(bp);
+}
+
+static uint32_t bios_cmd_table_para_revision(void *cgs_device,
+					     uint32_t index)
+{
+	uint8_t frev, crev;
+
+	if (cgs_atom_get_cmd_table_revs(cgs_device,
+					index,
+					&frev, &crev) != 0)
+		return 0;
+	return crev;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  D I G E N C O D E R C O N T R O L
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result encoder_control_digx_v3(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl);
+
+static enum bp_result encoder_control_digx_v4(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl);
+
+static enum bp_result encoder_control_digx_v5(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl);
+
+static void init_encoder_control_dig_v1(struct bios_parser *bp);
+
+static void init_dig_encoder_control(struct bios_parser *bp)
+{
+	uint32_t version =
+		BIOS_CMD_TABLE_PARA_REVISION(DIGxEncoderControl);
+
+	switch (version) {
+	case 2:
+		bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v3;
+		break;
+	case 4:
+		bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v4;
+		break;
+
+	case 5:
+		bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v5;
+		break;
+
+	default:
+		init_encoder_control_dig_v1(bp);
+		break;
+	}
+}
+
+static enum bp_result encoder_control_dig_v1(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl);
+static enum bp_result encoder_control_dig1_v1(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl);
+static enum bp_result encoder_control_dig2_v1(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl);
+
+static void init_encoder_control_dig_v1(struct bios_parser *bp)
+{
+	struct cmd_tbl *cmd_tbl = &bp->cmd_tbl;
+
+	if (1 == BIOS_CMD_TABLE_PARA_REVISION(DIG1EncoderControl))
+		cmd_tbl->encoder_control_dig1 = encoder_control_dig1_v1;
+	else
+		cmd_tbl->encoder_control_dig1 = NULL;
+
+	if (1 == BIOS_CMD_TABLE_PARA_REVISION(DIG2EncoderControl))
+		cmd_tbl->encoder_control_dig2 = encoder_control_dig2_v1;
+	else
+		cmd_tbl->encoder_control_dig2 = NULL;
+
+	cmd_tbl->dig_encoder_control = encoder_control_dig_v1;
+}
+
+static enum bp_result encoder_control_dig_v1(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	struct cmd_tbl *cmd_tbl = &bp->cmd_tbl;
+
+	if (cntl != NULL)
+		switch (cntl->engine_id) {
+		case ENGINE_ID_DIGA:
+			if (cmd_tbl->encoder_control_dig1 != NULL)
+				result =
+					cmd_tbl->encoder_control_dig1(bp, cntl);
+			break;
+		case ENGINE_ID_DIGB:
+			if (cmd_tbl->encoder_control_dig2 != NULL)
+				result =
+					cmd_tbl->encoder_control_dig2(bp, cntl);
+			break;
+
+		default:
+			break;
+		}
+
+	return result;
+}
+
+static enum bp_result encoder_control_dig1_v1(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DIG_ENCODER_CONTROL_PARAMETERS_V2 params = {0};
+
+	bp->cmd_helper->assign_control_parameter(bp->cmd_helper, cntl, &params);
+
+	if (EXEC_BIOS_CMD_TABLE(DIG1EncoderControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result encoder_control_dig2_v1(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DIG_ENCODER_CONTROL_PARAMETERS_V2 params = {0};
+
+	bp->cmd_helper->assign_control_parameter(bp->cmd_helper, cntl, &params);
+
+	if (EXEC_BIOS_CMD_TABLE(DIG2EncoderControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result encoder_control_digx_v3(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DIG_ENCODER_CONTROL_PARAMETERS_V3 params = {0};
+
+	if (LANE_COUNT_FOUR < cntl->lanes_number)
+		params.acConfig.ucDPLinkRate = 1; /* dual link 2.7GHz */
+	else
+		params.acConfig.ucDPLinkRate = 0; /* single link 1.62GHz */
+
+	params.acConfig.ucDigSel = (uint8_t)(cntl->engine_id);
+
+	/* We need to convert from KHz units into 10KHz units */
+	params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+	params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+	params.ucEncoderMode =
+			(uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+					cntl->signal,
+					cntl->enable_dp_audio);
+	params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
+	if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result encoder_control_digx_v4(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DIG_ENCODER_CONTROL_PARAMETERS_V4 params = {0};
+
+	if (LANE_COUNT_FOUR < cntl->lanes_number)
+		params.acConfig.ucDPLinkRate = 1; /* dual link 2.7GHz */
+	else
+		params.acConfig.ucDPLinkRate = 0; /* single link 1.62GHz */
+
+	params.acConfig.ucDigSel = (uint8_t)(cntl->engine_id);
+
+	/* We need to convert from KHz units into 10KHz units */
+	params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+	params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+	params.ucEncoderMode =
+			(uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+					cntl->signal,
+					cntl->enable_dp_audio));
+	params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
+	if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result encoder_control_digx_v5(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	ENCODER_STREAM_SETUP_PARAMETERS_V5 params = {0};
+
+	params.ucDigId = (uint8_t)(cntl->engine_id);
+	params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+
+	params.ulPixelClock = cntl->pixel_clock / 10;
+	params.ucDigMode =
+			(uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+					cntl->signal,
+					cntl->enable_dp_audio));
+	params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
+	switch (cntl->color_depth) {
+	case COLOR_DEPTH_888:
+		params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+		break;
+	case COLOR_DEPTH_101010:
+		params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+		break;
+	case COLOR_DEPTH_121212:
+		params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+		break;
+	case COLOR_DEPTH_161616:
+		params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+		break;
+	default:
+		break;
+	}
+
+	if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+		switch (cntl->color_depth) {
+		case COLOR_DEPTH_101010:
+			params.ulPixelClock =
+				(params.ulPixelClock * 30) / 24;
+			break;
+		case COLOR_DEPTH_121212:
+			params.ulPixelClock =
+				(params.ulPixelClock * 36) / 24;
+			break;
+		case COLOR_DEPTH_161616:
+			params.ulPixelClock =
+				(params.ulPixelClock * 48) / 24;
+			break;
+		default:
+			break;
+		}
+
+	if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  TRANSMITTER CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result transmitter_control_v2(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v3(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v4(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v1_5(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v1_6(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl);
+
+static void init_transmitter_control(struct bios_parser *bp)
+{
+	uint8_t frev;
+	uint8_t crev;
+
+	if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl,
+			frev, crev) != 0)
+		BREAK_TO_DEBUGGER();
+	switch (crev) {
+	case 2:
+		bp->cmd_tbl.transmitter_control = transmitter_control_v2;
+		break;
+	case 3:
+		bp->cmd_tbl.transmitter_control = transmitter_control_v3;
+		break;
+	case 4:
+		bp->cmd_tbl.transmitter_control = transmitter_control_v4;
+		break;
+	case 5:
+		bp->cmd_tbl.transmitter_control = transmitter_control_v1_5;
+		break;
+	case 6:
+		bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
+		break;
+	default:
+		bp->cmd_tbl.transmitter_control = NULL;
+		break;
+	}
+}
+
+static enum bp_result transmitter_control_v2(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 params;
+	enum connector_id connector_id =
+		dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
+
+	memset(&params, 0, sizeof(params));
+
+	switch (cntl->transmitter) {
+	case TRANSMITTER_UNIPHY_A:
+	case TRANSMITTER_UNIPHY_B:
+	case TRANSMITTER_UNIPHY_C:
+	case TRANSMITTER_UNIPHY_D:
+	case TRANSMITTER_UNIPHY_E:
+	case TRANSMITTER_UNIPHY_F:
+	case TRANSMITTER_TRAVIS_LCD:
+		break;
+	default:
+		return BP_RESULT_BADINPUT;
+	}
+
+	switch (cntl->action) {
+	case TRANSMITTER_CONTROL_INIT:
+		if ((CONNECTOR_ID_DUAL_LINK_DVII == connector_id) ||
+				(CONNECTOR_ID_DUAL_LINK_DVID == connector_id))
+			/* on INIT this bit should be set according to the
+			 * phisycal connector
+			 * Bit0: dual link connector flag
+			 * =0 connector is single link connector
+			 * =1 connector is dual link connector
+			 */
+			params.acConfig.fDualLinkConnector = 1;
+
+		/* connector object id */
+		params.usInitInfo =
+				cpu_to_le16((uint8_t)cntl->connector_obj_id.id);
+		break;
+	case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
+		/* votage swing and pre-emphsis */
+		params.asMode.ucLaneSel = (uint8_t)cntl->lane_select;
+		params.asMode.ucLaneSet = (uint8_t)cntl->lane_settings;
+		break;
+	default:
+		/* if dual-link */
+		if (LANE_COUNT_FOUR < cntl->lanes_number) {
+			/* on ENABLE/DISABLE this bit should be set according to
+			 * actual timing (number of lanes)
+			 * Bit0: dual link connector flag
+			 * =0 connector is single link connector
+			 * =1 connector is dual link connector
+			 */
+			params.acConfig.fDualLinkConnector = 1;
+
+			/* link rate, half for dual link
+			 * We need to convert from KHz units into 20KHz units
+			 */
+			params.usPixelClock =
+					cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
+		} else
+			/* link rate, half for dual link
+			 * We need to convert from KHz units into 10KHz units
+			 */
+			params.usPixelClock =
+					cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+		break;
+	}
+
+	/* 00 - coherent mode
+	 * 01 - incoherent mode
+	 */
+
+	params.acConfig.fCoherentMode = cntl->coherent;
+
+	if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
+			|| (TRANSMITTER_UNIPHY_D == cntl->transmitter)
+			|| (TRANSMITTER_UNIPHY_F == cntl->transmitter))
+		/* Bit2: Transmitter Link selection
+		 * =0 when bit0=0, single link A/C/E, when bit0=1,
+		 * master link A/C/E
+		 * =1 when bit0=0, single link B/D/F, when bit0=1,
+		 * master link B/D/F
+		 */
+		params.acConfig.ucLinkSel = 1;
+
+	if (ENGINE_ID_DIGB == cntl->engine_id)
+		/* Bit3: Transmitter data source selection
+		 * =0 DIGA is data source.
+		 * =1 DIGB is data source.
+		 * This bit is only useful when ucAction= ATOM_ENABLE
+		 */
+		params.acConfig.ucEncoderSel = 1;
+
+	if (CONNECTOR_ID_DISPLAY_PORT == connector_id)
+		/* Bit4: DP connector flag
+		 * =0 connector is none-DP connector
+		 * =1 connector is DP connector
+		 */
+		params.acConfig.fDPConnector = 1;
+
+	/* Bit[7:6]: Transmitter selection
+	 * =0 UNIPHY_ENCODER: UNIPHYA/B
+	 * =1 UNIPHY1_ENCODER: UNIPHYC/D
+	 * =2 UNIPHY2_ENCODER: UNIPHYE/F
+	 * =3 reserved
+	 */
+	params.acConfig.ucTransmitterSel =
+			(uint8_t)bp->cmd_helper->transmitter_bp_to_atom(
+					cntl->transmitter);
+
+	params.ucAction = (uint8_t)cntl->action;
+
+	if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result transmitter_control_v3(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 params;
+	uint32_t pll_id;
+	enum connector_id conn_id =
+			dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
+	const struct command_table_helper *cmd = bp->cmd_helper;
+	bool dual_link_conn = (CONNECTOR_ID_DUAL_LINK_DVII == conn_id)
+					|| (CONNECTOR_ID_DUAL_LINK_DVID == conn_id);
+
+	memset(&params, 0, sizeof(params));
+
+	switch (cntl->transmitter) {
+	case TRANSMITTER_UNIPHY_A:
+	case TRANSMITTER_UNIPHY_B:
+	case TRANSMITTER_UNIPHY_C:
+	case TRANSMITTER_UNIPHY_D:
+	case TRANSMITTER_UNIPHY_E:
+	case TRANSMITTER_UNIPHY_F:
+	case TRANSMITTER_TRAVIS_LCD:
+		break;
+	default:
+		return BP_RESULT_BADINPUT;
+	}
+
+	if (!cmd->clock_source_id_to_atom(cntl->pll_id, &pll_id))
+		return BP_RESULT_BADINPUT;
+
+	/* fill information based on the action */
+	switch (cntl->action) {
+	case TRANSMITTER_CONTROL_INIT:
+		if (dual_link_conn) {
+			/* on INIT this bit should be set according to the
+			 * phisycal connector
+			 * Bit0: dual link connector flag
+			 * =0 connector is single link connector
+			 * =1 connector is dual link connector
+			 */
+			params.acConfig.fDualLinkConnector = 1;
+		}
+
+		/* connector object id */
+		params.usInitInfo =
+				cpu_to_le16((uint8_t)(cntl->connector_obj_id.id));
+		break;
+	case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
+		/* votage swing and pre-emphsis */
+		params.asMode.ucLaneSel = (uint8_t)cntl->lane_select;
+		params.asMode.ucLaneSet = (uint8_t)cntl->lane_settings;
+		break;
+	default:
+		if (dual_link_conn && cntl->multi_path)
+			/* on ENABLE/DISABLE this bit should be set according to
+			 * actual timing (number of lanes)
+			 * Bit0: dual link connector flag
+			 * =0 connector is single link connector
+			 * =1 connector is dual link connector
+			 */
+			params.acConfig.fDualLinkConnector = 1;
+
+		/* if dual-link */
+		if (LANE_COUNT_FOUR < cntl->lanes_number) {
+			/* on ENABLE/DISABLE this bit should be set according to
+			 * actual timing (number of lanes)
+			 * Bit0: dual link connector flag
+			 * =0 connector is single link connector
+			 * =1 connector is dual link connector
+			 */
+			params.acConfig.fDualLinkConnector = 1;
+
+			/* link rate, half for dual link
+			 * We need to convert from KHz units into 20KHz units
+			 */
+			params.usPixelClock =
+					cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
+		} else {
+			/* link rate, half for dual link
+			 * We need to convert from KHz units into 10KHz units
+			 */
+			params.usPixelClock =
+					cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+		}
+		break;
+	}
+
+	/* 00 - coherent mode
+	 * 01 - incoherent mode
+	 */
+
+	params.acConfig.fCoherentMode = cntl->coherent;
+
+	if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
+		|| (TRANSMITTER_UNIPHY_D == cntl->transmitter)
+		|| (TRANSMITTER_UNIPHY_F == cntl->transmitter))
+		/* Bit2: Transmitter Link selection
+		 * =0 when bit0=0, single link A/C/E, when bit0=1,
+		 * master link A/C/E
+		 * =1 when bit0=0, single link B/D/F, when bit0=1,
+		 * master link B/D/F
+		 */
+		params.acConfig.ucLinkSel = 1;
+
+	if (ENGINE_ID_DIGB == cntl->engine_id)
+		/* Bit3: Transmitter data source selection
+		 * =0 DIGA is data source.
+		 * =1 DIGB is data source.
+		 * This bit is only useful when ucAction= ATOM_ENABLE
+		 */
+		params.acConfig.ucEncoderSel = 1;
+
+	/* Bit[7:6]: Transmitter selection
+	 * =0 UNIPHY_ENCODER: UNIPHYA/B
+	 * =1 UNIPHY1_ENCODER: UNIPHYC/D
+	 * =2 UNIPHY2_ENCODER: UNIPHYE/F
+	 * =3 reserved
+	 */
+	params.acConfig.ucTransmitterSel =
+			(uint8_t)cmd->transmitter_bp_to_atom(cntl->transmitter);
+
+	params.ucLaneNum = (uint8_t)cntl->lanes_number;
+
+	params.acConfig.ucRefClkSource = (uint8_t)pll_id;
+
+	params.ucAction = (uint8_t)cntl->action;
+
+	if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result transmitter_control_v4(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 params;
+	uint32_t ref_clk_src_id;
+	enum connector_id conn_id =
+			dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
+	const struct command_table_helper *cmd = bp->cmd_helper;
+
+	memset(&params, 0, sizeof(params));
+
+	switch (cntl->transmitter) {
+	case TRANSMITTER_UNIPHY_A:
+	case TRANSMITTER_UNIPHY_B:
+	case TRANSMITTER_UNIPHY_C:
+	case TRANSMITTER_UNIPHY_D:
+	case TRANSMITTER_UNIPHY_E:
+	case TRANSMITTER_UNIPHY_F:
+	case TRANSMITTER_TRAVIS_LCD:
+		break;
+	default:
+		return BP_RESULT_BADINPUT;
+	}
+
+	if (!cmd->clock_source_id_to_ref_clk_src(cntl->pll_id, &ref_clk_src_id))
+		return BP_RESULT_BADINPUT;
+
+	switch (cntl->action) {
+	case TRANSMITTER_CONTROL_INIT:
+	{
+		if ((CONNECTOR_ID_DUAL_LINK_DVII == conn_id) ||
+				(CONNECTOR_ID_DUAL_LINK_DVID == conn_id))
+			/* on INIT this bit should be set according to the
+			 * phisycal connector
+			 * Bit0: dual link connector flag
+			 * =0 connector is single link connector
+			 * =1 connector is dual link connector
+			 */
+			params.acConfig.fDualLinkConnector = 1;
+
+		/* connector object id */
+		params.usInitInfo =
+				cpu_to_le16((uint8_t)(cntl->connector_obj_id.id));
+	}
+	break;
+	case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
+		/* votage swing and pre-emphsis */
+		params.asMode.ucLaneSel = (uint8_t)(cntl->lane_select);
+		params.asMode.ucLaneSet = (uint8_t)(cntl->lane_settings);
+		break;
+	default:
+		if ((CONNECTOR_ID_DUAL_LINK_DVII == conn_id) ||
+				(CONNECTOR_ID_DUAL_LINK_DVID == conn_id))
+			/* on ENABLE/DISABLE this bit should be set according to
+			 * actual timing (number of lanes)
+			 * Bit0: dual link connector flag
+			 * =0 connector is single link connector
+			 * =1 connector is dual link connector
+			 */
+			params.acConfig.fDualLinkConnector = 1;
+
+		/* if dual-link */
+		if (LANE_COUNT_FOUR < cntl->lanes_number)
+			/* link rate, half for dual link
+			 * We need to convert from KHz units into 20KHz units
+			 */
+			params.usPixelClock =
+					cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
+		else {
+			/* link rate, half for dual link
+			 * We need to convert from KHz units into 10KHz units
+			 */
+			params.usPixelClock =
+					cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+		}
+		break;
+	}
+
+	/* 00 - coherent mode
+	 * 01 - incoherent mode
+	 */
+
+	params.acConfig.fCoherentMode = cntl->coherent;
+
+	if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
+		|| (TRANSMITTER_UNIPHY_D == cntl->transmitter)
+		|| (TRANSMITTER_UNIPHY_F == cntl->transmitter))
+		/* Bit2: Transmitter Link selection
+		 * =0 when bit0=0, single link A/C/E, when bit0=1,
+		 * master link A/C/E
+		 * =1 when bit0=0, single link B/D/F, when bit0=1,
+		 * master link B/D/F
+		 */
+		params.acConfig.ucLinkSel = 1;
+
+	if (ENGINE_ID_DIGB == cntl->engine_id)
+		/* Bit3: Transmitter data source selection
+		 * =0 DIGA is data source.
+		 * =1 DIGB is data source.
+		 * This bit is only useful when ucAction= ATOM_ENABLE
+		 */
+		params.acConfig.ucEncoderSel = 1;
+
+	/* Bit[7:6]: Transmitter selection
+	 * =0 UNIPHY_ENCODER: UNIPHYA/B
+	 * =1 UNIPHY1_ENCODER: UNIPHYC/D
+	 * =2 UNIPHY2_ENCODER: UNIPHYE/F
+	 * =3 reserved
+	 */
+	params.acConfig.ucTransmitterSel =
+		(uint8_t)(cmd->transmitter_bp_to_atom(cntl->transmitter));
+	params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+	params.acConfig.ucRefClkSource = (uint8_t)(ref_clk_src_id);
+	params.ucAction = (uint8_t)(cntl->action);
+
+	if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result transmitter_control_v1_5(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	const struct command_table_helper *cmd = bp->cmd_helper;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 params;
+
+	memset(&params, 0, sizeof(params));
+	params.ucPhyId = cmd->phy_id_to_atom(cntl->transmitter);
+	params.ucAction = (uint8_t)cntl->action;
+	params.ucLaneNum = (uint8_t)cntl->lanes_number;
+	params.ucConnObjId = (uint8_t)cntl->connector_obj_id.id;
+
+	params.ucDigMode =
+		cmd->signal_type_to_atom_dig_mode(cntl->signal);
+	params.asConfig.ucPhyClkSrcId =
+		cmd->clock_source_id_to_atom_phy_clk_src_id(cntl->pll_id);
+	/* 00 - coherent mode */
+	params.asConfig.ucCoherentMode = cntl->coherent;
+	params.asConfig.ucHPDSel =
+		cmd->hpd_sel_to_atom(cntl->hpd_sel);
+	params.ucDigEncoderSel =
+		cmd->dig_encoder_sel_to_atom(cntl->engine_id);
+	params.ucDPLaneSet = (uint8_t) cntl->lane_settings;
+	params.usSymClock = cpu_to_le16((uint16_t) (cntl->pixel_clock / 10));
+	/*
+	 * In SI/TN case, caller have to set usPixelClock as following:
+	 * DP mode: usPixelClock = DP_LINK_CLOCK/10
+	 * (DP_LINK_CLOCK = 1.62GHz, 2.7GHz, 5.4GHz)
+	 * DVI single link mode: usPixelClock = pixel clock
+	 * DVI dual link mode: usPixelClock = pixel clock
+	 * HDMI mode: usPixelClock = pixel clock * deep_color_ratio
+	 * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
+	 * LVDS mode: usPixelClock = pixel clock
+	 */
+
+	if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result transmitter_control_v1_6(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	const struct command_table_helper *cmd = bp->cmd_helper;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6 params;
+
+	memset(&params, 0, sizeof(params));
+	params.ucPhyId = cmd->phy_id_to_atom(cntl->transmitter);
+	params.ucAction = (uint8_t)cntl->action;
+
+	if (cntl->action == TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS)
+		params.ucDPLaneSet = (uint8_t)cntl->lane_settings;
+	else
+		params.ucDigMode = cmd->signal_type_to_atom_dig_mode(cntl->signal);
+
+	params.ucLaneNum = (uint8_t)cntl->lanes_number;
+	params.ucHPDSel = cmd->hpd_sel_to_atom(cntl->hpd_sel);
+	params.ucDigEncoderSel = cmd->dig_encoder_sel_to_atom(cntl->engine_id);
+	params.ucConnObjId = (uint8_t)cntl->connector_obj_id.id;
+	params.ulSymClock = cntl->pixel_clock/10;
+
+	/*
+	 * In SI/TN case, caller have to set usPixelClock as following:
+	 * DP mode: usPixelClock = DP_LINK_CLOCK/10
+	 * (DP_LINK_CLOCK = 1.62GHz, 2.7GHz, 5.4GHz)
+	 * DVI single link mode: usPixelClock = pixel clock
+	 * DVI dual link mode: usPixelClock = pixel clock
+	 * HDMI mode: usPixelClock = pixel clock * deep_color_ratio
+	 * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
+	 * LVDS mode: usPixelClock = pixel clock
+	 */
+	switch (cntl->signal) {
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		switch (cntl->color_depth) {
+		case COLOR_DEPTH_101010:
+			params.ulSymClock =
+				cpu_to_le16((le16_to_cpu(params.ulSymClock) * 30) / 24);
+			break;
+		case COLOR_DEPTH_121212:
+			params.ulSymClock =
+				cpu_to_le16((le16_to_cpu(params.ulSymClock) * 36) / 24);
+			break;
+		case COLOR_DEPTH_161616:
+			params.ulSymClock =
+				cpu_to_le16((le16_to_cpu(params.ulSymClock) * 48) / 24);
+			break;
+		default:
+			break;
+		}
+		break;
+		default:
+			break;
+	}
+
+	if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+		result = BP_RESULT_OK;
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  SET PIXEL CLOCK
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result set_pixel_clock_v3(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_v5(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_v6(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_v7(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params);
+
+static void init_set_pixel_clock(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)) {
+	case 3:
+		bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v3;
+		break;
+	case 5:
+		bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v5;
+		break;
+	case 6:
+		bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v6;
+		break;
+	case 7:
+		bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
+		break;
+	default:
+		bp->cmd_tbl.set_pixel_clock = NULL;
+		break;
+	}
+}
+
+static enum bp_result set_pixel_clock_v3(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	PIXEL_CLOCK_PARAMETERS_V3 *params;
+	SET_PIXEL_CLOCK_PS_ALLOCATION allocation;
+
+	memset(&allocation, 0, sizeof(allocation));
+
+	if (CLOCK_SOURCE_ID_PLL1 == bp_params->pll_id)
+		allocation.sPCLKInput.ucPpll = ATOM_PPLL1;
+	else if (CLOCK_SOURCE_ID_PLL2 == bp_params->pll_id)
+		allocation.sPCLKInput.ucPpll = ATOM_PPLL2;
+	else
+		return BP_RESULT_BADINPUT;
+
+	allocation.sPCLKInput.usRefDiv =
+			cpu_to_le16((uint16_t)bp_params->reference_divider);
+	allocation.sPCLKInput.usFbDiv =
+			cpu_to_le16((uint16_t)bp_params->feedback_divider);
+	allocation.sPCLKInput.ucFracFbDiv =
+			(uint8_t)bp_params->fractional_feedback_divider;
+	allocation.sPCLKInput.ucPostDiv =
+			(uint8_t)bp_params->pixel_clock_post_divider;
+
+	/* We need to convert from KHz units into 10KHz units */
+	allocation.sPCLKInput.usPixelClock =
+			cpu_to_le16((uint16_t)(bp_params->target_pixel_clock / 10));
+
+	params = (PIXEL_CLOCK_PARAMETERS_V3 *)&allocation.sPCLKInput;
+	params->ucTransmitterId =
+			bp->cmd_helper->encoder_id_to_atom(
+					dal_graphics_object_id_get_encoder_id(
+							bp_params->encoder_object_id));
+	params->ucEncoderMode =
+			(uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+					bp_params->signal_type, false));
+
+	if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+		params->ucMiscInfo |= PIXEL_CLOCK_MISC_FORCE_PROG_PPLL;
+
+	if (bp_params->flags.USE_E_CLOCK_AS_SOURCE_FOR_D_CLOCK)
+		params->ucMiscInfo |= PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK;
+
+	if (CONTROLLER_ID_D1 != bp_params->controller_id)
+		params->ucMiscInfo |= PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
+
+	if (EXEC_BIOS_CMD_TABLE(SetPixelClock, allocation))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+#ifndef SET_PIXEL_CLOCK_PS_ALLOCATION_V5
+/* video bios did not define this: */
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION_V5 {
+	PIXEL_CLOCK_PARAMETERS_V5 sPCLKInput;
+	/* Caller doesn't need to init this portion */
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;
+} SET_PIXEL_CLOCK_PS_ALLOCATION_V5;
+#endif
+
+#ifndef SET_PIXEL_CLOCK_PS_ALLOCATION_V6
+/* video bios did not define this: */
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION_V6 {
+	PIXEL_CLOCK_PARAMETERS_V6 sPCLKInput;
+	/* Caller doesn't need to init this portion */
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;
+} SET_PIXEL_CLOCK_PS_ALLOCATION_V6;
+#endif
+
+static enum bp_result set_pixel_clock_v5(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	SET_PIXEL_CLOCK_PS_ALLOCATION_V5 clk;
+	uint8_t controller_id;
+	uint32_t pll_id;
+
+	memset(&clk, 0, sizeof(clk));
+
+	if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+			&& bp->cmd_helper->controller_id_to_atom(
+					bp_params->controller_id, &controller_id)) {
+		clk.sPCLKInput.ucCRTC = controller_id;
+		clk.sPCLKInput.ucPpll = (uint8_t)pll_id;
+		clk.sPCLKInput.ucRefDiv =
+				(uint8_t)(bp_params->reference_divider);
+		clk.sPCLKInput.usFbDiv =
+				cpu_to_le16((uint16_t)(bp_params->feedback_divider));
+		clk.sPCLKInput.ulFbDivDecFrac =
+				cpu_to_le32(bp_params->fractional_feedback_divider);
+		clk.sPCLKInput.ucPostDiv =
+				(uint8_t)(bp_params->pixel_clock_post_divider);
+		clk.sPCLKInput.ucTransmitterID =
+				bp->cmd_helper->encoder_id_to_atom(
+						dal_graphics_object_id_get_encoder_id(
+								bp_params->encoder_object_id));
+		clk.sPCLKInput.ucEncoderMode =
+				(uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+						bp_params->signal_type, false);
+
+		/* We need to convert from KHz units into 10KHz units */
+		clk.sPCLKInput.usPixelClock =
+				cpu_to_le16((uint16_t)(bp_params->target_pixel_clock / 10));
+
+		if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+			clk.sPCLKInput.ucMiscInfo |=
+					PIXEL_CLOCK_MISC_FORCE_PROG_PPLL;
+
+		if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+			clk.sPCLKInput.ucMiscInfo |=
+					PIXEL_CLOCK_MISC_REF_DIV_SRC;
+
+		/* clkV5.ucMiscInfo bit[3:2]= HDMI panel bit depth: =0: 24bpp
+		 * =1:30bpp, =2:32bpp
+		 * driver choose program it itself, i.e. here we program it
+		 * to 888 by default.
+		 */
+
+		if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+			result = BP_RESULT_OK;
+	}
+
+	return result;
+}
+
+static enum bp_result set_pixel_clock_v6(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	SET_PIXEL_CLOCK_PS_ALLOCATION_V6 clk;
+	uint8_t controller_id;
+	uint32_t pll_id;
+
+	memset(&clk, 0, sizeof(clk));
+
+	if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+			&& bp->cmd_helper->controller_id_to_atom(
+					bp_params->controller_id, &controller_id)) {
+		/* Note: VBIOS still wants to use ucCRTC name which is now
+		 * 1 byte in ULONG
+		 *typedef struct _CRTC_PIXEL_CLOCK_FREQ
+		 *{
+		 * target the pixel clock to drive the CRTC timing.
+		 * ULONG ulPixelClock:24;
+		 * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
+		 * previous version.
+		 * ATOM_CRTC1~6, indicate the CRTC controller to
+		 * ULONG ucCRTC:8;
+		 * drive the pixel clock. not used for DCPLL case.
+		 *}CRTC_PIXEL_CLOCK_FREQ;
+		 *union
+		 *{
+		 * pixel clock and CRTC id frequency
+		 * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
+		 * ULONG ulDispEngClkFreq; dispclk frequency
+		 *};
+		 */
+		clk.sPCLKInput.ulCrtcPclkFreq.ucCRTC = controller_id;
+		clk.sPCLKInput.ucPpll = (uint8_t) pll_id;
+		clk.sPCLKInput.ucRefDiv =
+				(uint8_t) bp_params->reference_divider;
+		clk.sPCLKInput.usFbDiv =
+				cpu_to_le16((uint16_t) bp_params->feedback_divider);
+		clk.sPCLKInput.ulFbDivDecFrac =
+				cpu_to_le32(bp_params->fractional_feedback_divider);
+		clk.sPCLKInput.ucPostDiv =
+				(uint8_t) bp_params->pixel_clock_post_divider;
+		clk.sPCLKInput.ucTransmitterID =
+				bp->cmd_helper->encoder_id_to_atom(
+						dal_graphics_object_id_get_encoder_id(
+								bp_params->encoder_object_id));
+		clk.sPCLKInput.ucEncoderMode =
+				(uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(
+						bp_params->signal_type, false);
+
+		/* We need to convert from KHz units into 10KHz units */
+		clk.sPCLKInput.ulCrtcPclkFreq.ulPixelClock =
+				cpu_to_le32(bp_params->target_pixel_clock / 10);
+
+		if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) {
+			clk.sPCLKInput.ucMiscInfo |=
+					PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL;
+		}
+
+		if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC) {
+			clk.sPCLKInput.ucMiscInfo |=
+					PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
+		}
+
+		/* clkV6.ucMiscInfo bit[3:2]= HDMI panel bit depth: =0:
+		 * 24bpp =1:30bpp, =2:32bpp
+		 * driver choose program it itself, i.e. here we pass required
+		 * target rate that includes deep color.
+		 */
+
+		if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+			result = BP_RESULT_OK;
+	}
+
+	return result;
+}
+
+static enum bp_result set_pixel_clock_v7(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	PIXEL_CLOCK_PARAMETERS_V7 clk;
+	uint8_t controller_id;
+	uint32_t pll_id;
+
+	memset(&clk, 0, sizeof(clk));
+
+	if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+			&& bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &controller_id)) {
+		/* Note: VBIOS still wants to use ucCRTC name which is now
+		 * 1 byte in ULONG
+		 *typedef struct _CRTC_PIXEL_CLOCK_FREQ
+		 *{
+		 * target the pixel clock to drive the CRTC timing.
+		 * ULONG ulPixelClock:24;
+		 * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
+		 * previous version.
+		 * ATOM_CRTC1~6, indicate the CRTC controller to
+		 * ULONG ucCRTC:8;
+		 * drive the pixel clock. not used for DCPLL case.
+		 *}CRTC_PIXEL_CLOCK_FREQ;
+		 *union
+		 *{
+		 * pixel clock and CRTC id frequency
+		 * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
+		 * ULONG ulDispEngClkFreq; dispclk frequency
+		 *};
+		 */
+		clk.ucCRTC = controller_id;
+		clk.ucPpll = (uint8_t) pll_id;
+		clk.ucTransmitterID = bp->cmd_helper->encoder_id_to_atom(dal_graphics_object_id_get_encoder_id(bp_params->encoder_object_id));
+		clk.ucEncoderMode = (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(bp_params->signal_type, false);
+
+		/* We need to convert from KHz units into 10KHz units */
+		clk.ulPixelClock = cpu_to_le32(bp_params->target_pixel_clock * 10);
+
+		clk.ucDeepColorRatio = (uint8_t) bp->cmd_helper->transmitter_color_depth_to_atom(bp_params->color_depth);
+
+		if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+			clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL;
+
+		if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+			clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC;
+
+		if (bp_params->flags.PROGRAM_PHY_PLL_ONLY)
+			clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_PROG_PHYPLL;
+
+		if (bp_params->flags.SUPPORT_YUV_420)
+			clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_YUV420_MODE;
+
+		if (bp_params->flags.SET_XTALIN_REF_SRC)
+			clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_XTALIN;
+
+		if (bp_params->flags.SET_GENLOCK_REF_DIV_SRC)
+			clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_GENLK;
+
+		if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
+			clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
+
+		if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+			result = BP_RESULT_OK;
+	}
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  ENABLE PIXEL CLOCK SS
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result enable_spread_spectrum_on_ppll_v1(
+	struct bios_parser *bp,
+	struct bp_spread_spectrum_parameters *bp_params,
+	bool enable);
+static enum bp_result enable_spread_spectrum_on_ppll_v2(
+	struct bios_parser *bp,
+	struct bp_spread_spectrum_parameters *bp_params,
+	bool enable);
+static enum bp_result enable_spread_spectrum_on_ppll_v3(
+	struct bios_parser *bp,
+	struct bp_spread_spectrum_parameters *bp_params,
+	bool enable);
+
+static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL)) {
+	case 1:
+		bp->cmd_tbl.enable_spread_spectrum_on_ppll =
+				enable_spread_spectrum_on_ppll_v1;
+		break;
+	case 2:
+		bp->cmd_tbl.enable_spread_spectrum_on_ppll =
+				enable_spread_spectrum_on_ppll_v2;
+		break;
+	case 3:
+		bp->cmd_tbl.enable_spread_spectrum_on_ppll =
+				enable_spread_spectrum_on_ppll_v3;
+		break;
+	default:
+		bp->cmd_tbl.enable_spread_spectrum_on_ppll = NULL;
+		break;
+	}
+}
+
+static enum bp_result enable_spread_spectrum_on_ppll_v1(
+	struct bios_parser *bp,
+	struct bp_spread_spectrum_parameters *bp_params,
+	bool enable)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL params;
+
+	memset(&params, 0, sizeof(params));
+
+	if ((enable == true) && (bp_params->percentage > 0))
+		params.ucEnable = ATOM_ENABLE;
+	else
+		params.ucEnable = ATOM_DISABLE;
+
+	params.usSpreadSpectrumPercentage =
+			cpu_to_le16((uint16_t)bp_params->percentage);
+	params.ucSpreadSpectrumStep =
+			(uint8_t)bp_params->ver1.step;
+	params.ucSpreadSpectrumDelay =
+			(uint8_t)bp_params->ver1.delay;
+	/* convert back to unit of 10KHz */
+	params.ucSpreadSpectrumRange =
+			(uint8_t)(bp_params->ver1.range / 10000);
+
+	if (bp_params->flags.EXTERNAL_SS)
+		params.ucSpreadSpectrumType |= ATOM_EXTERNAL_SS_MASK;
+
+	if (bp_params->flags.CENTER_SPREAD)
+		params.ucSpreadSpectrumType |= ATOM_SS_CENTRE_SPREAD_MODE;
+
+	if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL1)
+		params.ucPpll = ATOM_PPLL1;
+	else if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL2)
+		params.ucPpll = ATOM_PPLL2;
+	else
+		BREAK_TO_DEBUGGER(); /* Unexpected PLL value!! */
+
+	if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result enable_spread_spectrum_on_ppll_v2(
+	struct bios_parser *bp,
+	struct bp_spread_spectrum_parameters *bp_params,
+	bool enable)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 params;
+
+	memset(&params, 0, sizeof(params));
+
+	if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL1)
+		params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V2_P1PLL;
+	else if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL2)
+		params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V2_P2PLL;
+	else
+		BREAK_TO_DEBUGGER(); /* Unexpected PLL value!! */
+
+	if ((enable == true) && (bp_params->percentage > 0)) {
+		params.ucEnable = ATOM_ENABLE;
+
+		params.usSpreadSpectrumPercentage =
+				cpu_to_le16((uint16_t)(bp_params->percentage));
+		params.usSpreadSpectrumStep =
+				cpu_to_le16((uint16_t)(bp_params->ds.ds_frac_size));
+
+		if (bp_params->flags.EXTERNAL_SS)
+			params.ucSpreadSpectrumType |=
+					ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD;
+
+		if (bp_params->flags.CENTER_SPREAD)
+			params.ucSpreadSpectrumType |=
+					ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD;
+
+		/* Both amounts need to be left shifted first before bit
+		 * comparison. Otherwise, the result will always be zero here
+		 */
+		params.usSpreadSpectrumAmount = cpu_to_le16((uint16_t)(
+				((bp_params->ds.feedback_amount <<
+						ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT) &
+						ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK) |
+						((bp_params->ds.nfrac_amount <<
+								ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
+								ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK)));
+	} else
+		params.ucEnable = ATOM_DISABLE;
+
+	if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result enable_spread_spectrum_on_ppll_v3(
+	struct bios_parser *bp,
+	struct bp_spread_spectrum_parameters *bp_params,
+	bool enable)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 params;
+
+	memset(&params, 0, sizeof(params));
+
+	switch (bp_params->pll_id) {
+	case CLOCK_SOURCE_ID_PLL0:
+		/* ATOM_PPLL_SS_TYPE_V3_P0PLL; this is pixel clock only,
+		 * not for SI display clock.
+		 */
+		params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_DCPLL;
+		break;
+	case CLOCK_SOURCE_ID_PLL1:
+		params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_P1PLL;
+		break;
+
+	case CLOCK_SOURCE_ID_PLL2:
+		params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_P2PLL;
+		break;
+
+	case CLOCK_SOURCE_ID_DCPLL:
+		params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_DCPLL;
+		break;
+
+	default:
+		BREAK_TO_DEBUGGER();
+		/* Unexpected PLL value!! */
+		return result;
+	}
+
+	if (enable == true) {
+		params.ucEnable = ATOM_ENABLE;
+
+		params.usSpreadSpectrumAmountFrac =
+				cpu_to_le16((uint16_t)(bp_params->ds_frac_amount));
+		params.usSpreadSpectrumStep =
+				cpu_to_le16((uint16_t)(bp_params->ds.ds_frac_size));
+
+		if (bp_params->flags.EXTERNAL_SS)
+			params.ucSpreadSpectrumType |=
+					ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD;
+		if (bp_params->flags.CENTER_SPREAD)
+			params.ucSpreadSpectrumType |=
+					ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD;
+
+		/* Both amounts need to be left shifted first before bit
+		 * comparison. Otherwise, the result will always be zero here
+		 */
+		params.usSpreadSpectrumAmount = cpu_to_le16((uint16_t)(
+				((bp_params->ds.feedback_amount <<
+						ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT) &
+						ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK) |
+						((bp_params->ds.nfrac_amount <<
+								ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT) &
+								ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK)));
+	} else
+		params.ucEnable = ATOM_DISABLE;
+
+	if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  ADJUST DISPLAY PLL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result adjust_display_pll_v2(
+	struct bios_parser *bp,
+	struct bp_adjust_pixel_clock_parameters *bp_params);
+static enum bp_result adjust_display_pll_v3(
+	struct bios_parser *bp,
+	struct bp_adjust_pixel_clock_parameters *bp_params);
+
+static void init_adjust_display_pll(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll)) {
+	case 2:
+		bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v2;
+		break;
+	case 3:
+		bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v3;
+		break;
+	default:
+		bp->cmd_tbl.adjust_display_pll = NULL;
+		break;
+	}
+}
+
+static enum bp_result adjust_display_pll_v2(
+	struct bios_parser *bp,
+	struct bp_adjust_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	ADJUST_DISPLAY_PLL_PS_ALLOCATION params = { 0 };
+
+	/* We need to convert from KHz units into 10KHz units and then convert
+	 * output pixel clock back 10KHz-->KHz */
+	uint32_t pixel_clock_10KHz_in = bp_params->pixel_clock / 10;
+
+	params.usPixelClock = cpu_to_le16((uint16_t)(pixel_clock_10KHz_in));
+	params.ucTransmitterID =
+			bp->cmd_helper->encoder_id_to_atom(
+					dal_graphics_object_id_get_encoder_id(
+							bp_params->encoder_object_id));
+	params.ucEncodeMode =
+			(uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+					bp_params->signal_type, false);
+	return result;
+}
+
+static enum bp_result adjust_display_pll_v3(
+	struct bios_parser *bp,
+	struct bp_adjust_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 params;
+	uint32_t pixel_clk_10_kHz_in = bp_params->pixel_clock / 10;
+
+	memset(&params, 0, sizeof(params));
+
+	/* We need to convert from KHz units into 10KHz units and then convert
+	 * output pixel clock back 10KHz-->KHz */
+	params.sInput.usPixelClock = cpu_to_le16((uint16_t)pixel_clk_10_kHz_in);
+	params.sInput.ucTransmitterID =
+			bp->cmd_helper->encoder_id_to_atom(
+					dal_graphics_object_id_get_encoder_id(
+							bp_params->encoder_object_id));
+	params.sInput.ucEncodeMode =
+			(uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+					bp_params->signal_type, false);
+
+	if (bp_params->ss_enable == true)
+		params.sInput.ucDispPllConfig |= DISPPLL_CONFIG_SS_ENABLE;
+
+	if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
+		params.sInput.ucDispPllConfig |= DISPPLL_CONFIG_DUAL_LINK;
+
+	if (EXEC_BIOS_CMD_TABLE(AdjustDisplayPll, params)) {
+		/* Convert output pixel clock back 10KHz-->KHz: multiply
+		 * original pixel clock in KHz by ratio
+		 * [output pxlClk/input pxlClk] */
+		uint64_t pixel_clk_10_khz_out =
+				(uint64_t)le32_to_cpu(params.sOutput.ulDispPllFreq);
+		uint64_t pixel_clk = (uint64_t)bp_params->pixel_clock;
+
+		if (pixel_clk_10_kHz_in != 0) {
+			bp_params->adjusted_pixel_clock =
+					div_u64(pixel_clk * pixel_clk_10_khz_out,
+							pixel_clk_10_kHz_in);
+		} else {
+			bp_params->adjusted_pixel_clock = 0;
+			BREAK_TO_DEBUGGER();
+		}
+
+		bp_params->reference_divider = params.sOutput.ucRefDiv;
+		bp_params->pixel_clock_post_divider = params.sOutput.ucPostDiv;
+
+		result = BP_RESULT_OK;
+	}
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  DAC ENCODER CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result dac1_encoder_control_v1(
+	struct bios_parser *bp,
+	bool enable,
+	uint32_t pixel_clock,
+	uint8_t dac_standard);
+static enum bp_result dac2_encoder_control_v1(
+	struct bios_parser *bp,
+	bool enable,
+	uint32_t pixel_clock,
+	uint8_t dac_standard);
+
+static void init_dac_encoder_control(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(DAC1EncoderControl)) {
+	case 1:
+		bp->cmd_tbl.dac1_encoder_control = dac1_encoder_control_v1;
+		break;
+	default:
+		bp->cmd_tbl.dac1_encoder_control = NULL;
+		break;
+	}
+	switch (BIOS_CMD_TABLE_PARA_REVISION(DAC2EncoderControl)) {
+	case 1:
+		bp->cmd_tbl.dac2_encoder_control = dac2_encoder_control_v1;
+		break;
+	default:
+		bp->cmd_tbl.dac2_encoder_control = NULL;
+		break;
+	}
+}
+
+static void dac_encoder_control_prepare_params(
+	DAC_ENCODER_CONTROL_PS_ALLOCATION *params,
+	bool enable,
+	uint32_t pixel_clock,
+	uint8_t dac_standard)
+{
+	params->ucDacStandard = dac_standard;
+	if (enable)
+		params->ucAction = ATOM_ENABLE;
+	else
+		params->ucAction = ATOM_DISABLE;
+
+	/* We need to convert from KHz units into 10KHz units
+	 * it looks as if the TvControl do not care about pixel clock
+	 */
+	params->usPixelClock = cpu_to_le16((uint16_t)(pixel_clock / 10));
+}
+
+static enum bp_result dac1_encoder_control_v1(
+	struct bios_parser *bp,
+	bool enable,
+	uint32_t pixel_clock,
+	uint8_t dac_standard)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DAC_ENCODER_CONTROL_PS_ALLOCATION params;
+
+	dac_encoder_control_prepare_params(
+		&params,
+		enable,
+		pixel_clock,
+		dac_standard);
+
+	if (EXEC_BIOS_CMD_TABLE(DAC1EncoderControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result dac2_encoder_control_v1(
+	struct bios_parser *bp,
+	bool enable,
+	uint32_t pixel_clock,
+	uint8_t dac_standard)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DAC_ENCODER_CONTROL_PS_ALLOCATION params;
+
+	dac_encoder_control_prepare_params(
+		&params,
+		enable,
+		pixel_clock,
+		dac_standard);
+
+	if (EXEC_BIOS_CMD_TABLE(DAC2EncoderControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  DAC OUTPUT CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result dac1_output_control_v1(
+	struct bios_parser *bp,
+	bool enable);
+static enum bp_result dac2_output_control_v1(
+	struct bios_parser *bp,
+	bool enable);
+
+static void init_dac_output_control(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(DAC1OutputControl)) {
+	case 1:
+		bp->cmd_tbl.dac1_output_control = dac1_output_control_v1;
+		break;
+	default:
+		bp->cmd_tbl.dac1_output_control = NULL;
+		break;
+	}
+	switch (BIOS_CMD_TABLE_PARA_REVISION(DAC2OutputControl)) {
+	case 1:
+		bp->cmd_tbl.dac2_output_control = dac2_output_control_v1;
+		break;
+	default:
+		bp->cmd_tbl.dac2_output_control = NULL;
+		break;
+	}
+}
+
+static enum bp_result dac1_output_control_v1(
+	struct bios_parser *bp, bool enable)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION params;
+
+	if (enable)
+		params.ucAction = ATOM_ENABLE;
+	else
+		params.ucAction = ATOM_DISABLE;
+
+	if (EXEC_BIOS_CMD_TABLE(DAC1OutputControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result dac2_output_control_v1(
+	struct bios_parser *bp, bool enable)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION params;
+
+	if (enable)
+		params.ucAction = ATOM_ENABLE;
+	else
+		params.ucAction = ATOM_DISABLE;
+
+	if (EXEC_BIOS_CMD_TABLE(DAC2OutputControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  SET CRTC TIMING
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+	struct bios_parser *bp,
+	struct bp_hw_crtc_timing_parameters *bp_params);
+static enum bp_result set_crtc_timing_v1(
+	struct bios_parser *bp,
+	struct bp_hw_crtc_timing_parameters *bp_params);
+
+static void init_set_crtc_timing(struct bios_parser *bp)
+{
+	uint32_t dtd_version =
+			BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_UsingDTDTiming);
+	if (dtd_version > 2)
+		switch (dtd_version) {
+		case 3:
+			bp->cmd_tbl.set_crtc_timing =
+					set_crtc_using_dtd_timing_v3;
+			break;
+		default:
+			bp->cmd_tbl.set_crtc_timing = NULL;
+			break;
+		}
+	else
+		switch (BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing)) {
+		case 1:
+			bp->cmd_tbl.set_crtc_timing = set_crtc_timing_v1;
+			break;
+		default:
+			bp->cmd_tbl.set_crtc_timing = NULL;
+			break;
+		}
+}
+
+static enum bp_result set_crtc_timing_v1(
+	struct bios_parser *bp,
+	struct bp_hw_crtc_timing_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION params = {0};
+	uint8_t atom_controller_id;
+
+	if (bp->cmd_helper->controller_id_to_atom(
+			bp_params->controller_id, &atom_controller_id))
+		params.ucCRTC = atom_controller_id;
+
+	params.usH_Total = cpu_to_le16((uint16_t)(bp_params->h_total));
+	params.usH_Disp = cpu_to_le16((uint16_t)(bp_params->h_addressable));
+	params.usH_SyncStart = cpu_to_le16((uint16_t)(bp_params->h_sync_start));
+	params.usH_SyncWidth = cpu_to_le16((uint16_t)(bp_params->h_sync_width));
+	params.usV_Total = cpu_to_le16((uint16_t)(bp_params->v_total));
+	params.usV_Disp = cpu_to_le16((uint16_t)(bp_params->v_addressable));
+	params.usV_SyncStart =
+			cpu_to_le16((uint16_t)(bp_params->v_sync_start));
+	params.usV_SyncWidth =
+			cpu_to_le16((uint16_t)(bp_params->v_sync_width));
+
+	/* VBIOS does not expect any value except zero into this call, for
+	 * underscan use another entry ProgramOverscan call but when mode
+	 * 1776x1000 with the overscan 72x44 .e.i. 1920x1080 @30 DAL2 is ok,
+	 * but when same ,but 60 Hz there is corruption
+	 * DAL1 does not allow the mode 1776x1000@60
+	 */
+	params.ucOverscanRight = (uint8_t)bp_params->h_overscan_right;
+	params.ucOverscanLeft = (uint8_t)bp_params->h_overscan_left;
+	params.ucOverscanBottom = (uint8_t)bp_params->v_overscan_bottom;
+	params.ucOverscanTop = (uint8_t)bp_params->v_overscan_top;
+
+	if (0 == bp_params->flags.HSYNC_POSITIVE_POLARITY)
+		params.susModeMiscInfo.usAccess =
+				cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_HSYNC_POLARITY);
+
+	if (0 == bp_params->flags.VSYNC_POSITIVE_POLARITY)
+		params.susModeMiscInfo.usAccess =
+				cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_VSYNC_POLARITY);
+
+	if (bp_params->flags.INTERLACE) {
+		params.susModeMiscInfo.usAccess =
+				cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_INTERLACE);
+
+		/* original DAL code has this condition to apply tis for
+		 * non-TV/CV only due to complex MV testing for possible
+		 * impact
+		 * if (pACParameters->signal != SignalType_YPbPr &&
+		 *  pACParameters->signal != SignalType_Composite &&
+		 *  pACParameters->signal != SignalType_SVideo)
+		 */
+		/* HW will deduct 0.5 line from 2nd feild.
+		 * i.e. for 1080i, it is 2 lines for 1st field, 2.5
+		 * lines for the 2nd feild. we need input as 5 instead
+		 * of 4, but it is 4 either from Edid data
+		 * (spec CEA 861) or CEA timing table.
+		 */
+		params.usV_SyncStart =
+				cpu_to_le16((uint16_t)(bp_params->v_sync_start + 1));
+	}
+
+	if (bp_params->flags.HORZ_COUNT_BY_TWO)
+		params.susModeMiscInfo.usAccess =
+				cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_DOUBLE_CLOCK_MODE);
+
+	if (EXEC_BIOS_CMD_TABLE(SetCRTC_Timing, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+	struct bios_parser *bp,
+	struct bp_hw_crtc_timing_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	SET_CRTC_USING_DTD_TIMING_PARAMETERS params = {0};
+	uint8_t atom_controller_id;
+
+	if (bp->cmd_helper->controller_id_to_atom(
+			bp_params->controller_id, &atom_controller_id))
+		params.ucCRTC = atom_controller_id;
+
+	/* bios usH_Size wants h addressable size */
+	params.usH_Size = cpu_to_le16((uint16_t)bp_params->h_addressable);
+	/* bios usH_Blanking_Time wants borders included in blanking */
+	params.usH_Blanking_Time =
+			cpu_to_le16((uint16_t)(bp_params->h_total - bp_params->h_addressable));
+	/* bios usV_Size wants v addressable size */
+	params.usV_Size = cpu_to_le16((uint16_t)bp_params->v_addressable);
+	/* bios usV_Blanking_Time wants borders included in blanking */
+	params.usV_Blanking_Time =
+			cpu_to_le16((uint16_t)(bp_params->v_total - bp_params->v_addressable));
+	/* bios usHSyncOffset is the offset from the end of h addressable,
+	 * our horizontalSyncStart is the offset from the beginning
+	 * of h addressable */
+	params.usH_SyncOffset =
+			cpu_to_le16((uint16_t)(bp_params->h_sync_start - bp_params->h_addressable));
+	params.usH_SyncWidth = cpu_to_le16((uint16_t)bp_params->h_sync_width);
+	/* bios usHSyncOffset is the offset from the end of v addressable,
+	 * our verticalSyncStart is the offset from the beginning of
+	 * v addressable */
+	params.usV_SyncOffset =
+			cpu_to_le16((uint16_t)(bp_params->v_sync_start - bp_params->v_addressable));
+	params.usV_SyncWidth = cpu_to_le16((uint16_t)bp_params->v_sync_width);
+
+	/* we assume that overscan from original timing does not get bigger
+	 * than 255
+	 * we will program all the borders in the Set CRTC Overscan call below
+	 */
+
+	if (0 == bp_params->flags.HSYNC_POSITIVE_POLARITY)
+		params.susModeMiscInfo.usAccess =
+				cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_HSYNC_POLARITY);
+
+	if (0 == bp_params->flags.VSYNC_POSITIVE_POLARITY)
+		params.susModeMiscInfo.usAccess =
+				cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_VSYNC_POLARITY);
+
+	if (bp_params->flags.INTERLACE)	{
+		params.susModeMiscInfo.usAccess =
+				cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_INTERLACE);
+
+		/* original DAL code has this condition to apply this
+		 * for non-TV/CV only
+		 * due to complex MV testing for possible impact
+		 * if ( pACParameters->signal != SignalType_YPbPr &&
+		 *  pACParameters->signal != SignalType_Composite &&
+		 *  pACParameters->signal != SignalType_SVideo)
+		 */
+		{
+			/* HW will deduct 0.5 line from 2nd feild.
+			 * i.e. for 1080i, it is 2 lines for 1st field,
+			 * 2.5 lines for the 2nd feild. we need input as 5
+			 * instead of 4.
+			 * but it is 4 either from Edid data (spec CEA 861)
+			 * or CEA timing table.
+			 */
+			params.usV_SyncOffset =
+					cpu_to_le16(le16_to_cpu(params.usV_SyncOffset) + 1);
+
+		}
+	}
+
+	if (bp_params->flags.HORZ_COUNT_BY_TWO)
+		params.susModeMiscInfo.usAccess =
+				cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_DOUBLE_CLOCK_MODE);
+
+	if (EXEC_BIOS_CMD_TABLE(SetCRTC_UsingDTDTiming, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  SELECT CRTC SOURCE
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result select_crtc_source_v2(
+	struct bios_parser *bp,
+	struct bp_crtc_source_select *bp_params);
+static enum bp_result select_crtc_source_v3(
+	struct bios_parser *bp,
+	struct bp_crtc_source_select *bp_params);
+
+static void init_select_crtc_source(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) {
+	case 2:
+		bp->cmd_tbl.select_crtc_source = select_crtc_source_v2;
+		break;
+	case 3:
+		bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
+		break;
+	default:
+		bp->cmd_tbl.select_crtc_source = NULL;
+		break;
+	}
+}
+
+static enum bp_result select_crtc_source_v2(
+	struct bios_parser *bp,
+	struct bp_crtc_source_select *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	SELECT_CRTC_SOURCE_PARAMETERS_V2 params;
+	uint8_t atom_controller_id;
+	uint32_t atom_engine_id;
+	enum signal_type s = bp_params->signal;
+
+	memset(&params, 0, sizeof(params));
+
+	/* set controller id */
+	if (bp->cmd_helper->controller_id_to_atom(
+			bp_params->controller_id, &atom_controller_id))
+		params.ucCRTC = atom_controller_id;
+	else
+		return BP_RESULT_FAILURE;
+
+	/* set encoder id */
+	if (bp->cmd_helper->engine_bp_to_atom(
+			bp_params->engine_id, &atom_engine_id))
+		params.ucEncoderID = (uint8_t)atom_engine_id;
+	else
+		return BP_RESULT_FAILURE;
+
+	if (SIGNAL_TYPE_EDP == s ||
+			(SIGNAL_TYPE_DISPLAY_PORT == s &&
+					SIGNAL_TYPE_LVDS == bp_params->sink_signal))
+		s = SIGNAL_TYPE_LVDS;
+
+	params.ucEncodeMode =
+			(uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+					s, bp_params->enable_dp_audio);
+
+	if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result select_crtc_source_v3(
+	struct bios_parser *bp,
+	struct bp_crtc_source_select *bp_params)
+{
+	bool result = BP_RESULT_FAILURE;
+	SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
+	uint8_t atom_controller_id;
+	uint32_t atom_engine_id;
+	enum signal_type s = bp_params->signal;
+
+	memset(&params, 0, sizeof(params));
+
+	if (bp->cmd_helper->controller_id_to_atom(bp_params->controller_id,
+			&atom_controller_id))
+		params.ucCRTC = atom_controller_id;
+	else
+		return result;
+
+	if (bp->cmd_helper->engine_bp_to_atom(bp_params->engine_id,
+			&atom_engine_id))
+		params.ucEncoderID = (uint8_t)atom_engine_id;
+	else
+		return result;
+
+	if (SIGNAL_TYPE_EDP == s ||
+			(SIGNAL_TYPE_DISPLAY_PORT == s &&
+					SIGNAL_TYPE_LVDS == bp_params->sink_signal))
+		s = SIGNAL_TYPE_LVDS;
+
+	params.ucEncodeMode =
+			bp->cmd_helper->encoder_mode_bp_to_atom(
+					s, bp_params->enable_dp_audio);
+	/* Needed for VBIOS Random Spatial Dithering feature */
+	params.ucDstBpc = (uint8_t)(bp_params->display_output_bit_depth);
+
+	if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  ENABLE CRTC
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result enable_crtc_v1(
+	struct bios_parser *bp,
+	enum controller_id controller_id,
+	bool enable);
+
+static void init_enable_crtc(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC)) {
+	case 1:
+		bp->cmd_tbl.enable_crtc = enable_crtc_v1;
+		break;
+	default:
+		bp->cmd_tbl.enable_crtc = NULL;
+		break;
+	}
+}
+
+static enum bp_result enable_crtc_v1(
+	struct bios_parser *bp,
+	enum controller_id controller_id,
+	bool enable)
+{
+	bool result = BP_RESULT_FAILURE;
+	ENABLE_CRTC_PARAMETERS params = {0};
+	uint8_t id;
+
+	if (bp->cmd_helper->controller_id_to_atom(controller_id, &id))
+		params.ucCRTC = id;
+	else
+		return BP_RESULT_BADINPUT;
+
+	if (enable)
+		params.ucEnable = ATOM_ENABLE;
+	else
+		params.ucEnable = ATOM_DISABLE;
+
+	if (EXEC_BIOS_CMD_TABLE(EnableCRTC, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  ENABLE CRTC MEM REQ
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result enable_crtc_mem_req_v1(
+	struct bios_parser *bp,
+	enum controller_id controller_id,
+	bool enable);
+
+static void init_enable_crtc_mem_req(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(EnableCRTCMemReq)) {
+	case 1:
+		bp->cmd_tbl.enable_crtc_mem_req = enable_crtc_mem_req_v1;
+		break;
+	default:
+		bp->cmd_tbl.enable_crtc_mem_req = NULL;
+		break;
+	}
+}
+
+static enum bp_result enable_crtc_mem_req_v1(
+	struct bios_parser *bp,
+	enum controller_id controller_id,
+	bool enable)
+{
+	bool result = BP_RESULT_BADINPUT;
+	ENABLE_CRTC_PARAMETERS params = {0};
+	uint8_t id;
+
+	if (bp->cmd_helper->controller_id_to_atom(controller_id, &id)) {
+		params.ucCRTC = id;
+
+		if (enable)
+			params.ucEnable = ATOM_ENABLE;
+		else
+			params.ucEnable = ATOM_DISABLE;
+
+		if (EXEC_BIOS_CMD_TABLE(EnableCRTCMemReq, params))
+			result = BP_RESULT_OK;
+		else
+			result = BP_RESULT_FAILURE;
+	}
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  DISPLAY PLL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result program_clock_v5(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result program_clock_v6(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params);
+
+static void init_program_clock(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)) {
+	case 5:
+		bp->cmd_tbl.program_clock = program_clock_v5;
+		break;
+	case 6:
+		bp->cmd_tbl.program_clock = program_clock_v6;
+		break;
+	default:
+		bp->cmd_tbl.program_clock = NULL;
+		break;
+	}
+}
+
+static enum bp_result program_clock_v5(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+
+	SET_PIXEL_CLOCK_PS_ALLOCATION_V5 params;
+	uint32_t atom_pll_id;
+
+	memset(&params, 0, sizeof(params));
+	if (!bp->cmd_helper->clock_source_id_to_atom(
+			bp_params->pll_id, &atom_pll_id)) {
+		BREAK_TO_DEBUGGER(); /* Invalid Inpute!! */
+		return BP_RESULT_BADINPUT;
+	}
+
+	/* We need to convert from KHz units into 10KHz units */
+	params.sPCLKInput.ucPpll = (uint8_t) atom_pll_id;
+	params.sPCLKInput.usPixelClock =
+			cpu_to_le16((uint16_t) (bp_params->target_pixel_clock / 10));
+	params.sPCLKInput.ucCRTC = (uint8_t) ATOM_CRTC_INVALID;
+
+	if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+		params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
+
+	if (EXEC_BIOS_CMD_TABLE(SetPixelClock, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+static enum bp_result program_clock_v6(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+
+	SET_PIXEL_CLOCK_PS_ALLOCATION_V6 params;
+	uint32_t atom_pll_id;
+
+	memset(&params, 0, sizeof(params));
+
+	if (!bp->cmd_helper->clock_source_id_to_atom(
+			bp_params->pll_id, &atom_pll_id)) {
+		BREAK_TO_DEBUGGER(); /*Invalid Input!!*/
+		return BP_RESULT_BADINPUT;
+	}
+
+	/* We need to convert from KHz units into 10KHz units */
+	params.sPCLKInput.ucPpll = (uint8_t)atom_pll_id;
+	params.sPCLKInput.ulDispEngClkFreq =
+			cpu_to_le32(bp_params->target_pixel_clock / 10);
+
+	if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+		params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
+
+	if (EXEC_BIOS_CMD_TABLE(SetPixelClock, params)) {
+		/* True display clock is returned by VBIOS if DFS bypass
+		 * is enabled. */
+		bp_params->dfs_bypass_display_clock =
+				(uint32_t)(le32_to_cpu(params.sPCLKInput.ulDispEngClkFreq) * 10);
+		result = BP_RESULT_OK;
+	}
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  EXTERNAL ENCODER CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result external_encoder_control_v3(
+	struct bios_parser *bp,
+	struct bp_external_encoder_control *cntl);
+
+static void init_external_encoder_control(
+	struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(ExternalEncoderControl)) {
+	case 3:
+		bp->cmd_tbl.external_encoder_control =
+				external_encoder_control_v3;
+		break;
+	default:
+		bp->cmd_tbl.external_encoder_control = NULL;
+		break;
+	}
+}
+
+static enum bp_result external_encoder_control_v3(
+	struct bios_parser *bp,
+	struct bp_external_encoder_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+
+	/* we need use _PS_Alloc struct */
+	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 params;
+	EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 *cntl_params;
+	struct graphics_object_id encoder;
+	bool is_input_signal_dp = false;
+
+	memset(&params, 0, sizeof(params));
+
+	cntl_params = &params.sExtEncoder;
+
+	encoder = cntl->encoder_id;
+
+	/* check if encoder supports external encoder control table */
+	switch (dal_graphics_object_id_get_encoder_id(encoder)) {
+	case ENCODER_ID_EXTERNAL_NUTMEG:
+	case ENCODER_ID_EXTERNAL_TRAVIS:
+		is_input_signal_dp = true;
+		break;
+
+	default:
+		BREAK_TO_DEBUGGER();
+		return BP_RESULT_BADINPUT;
+	}
+
+	/* Fill information based on the action
+	 *
+	 * Bit[6:4]: indicate external encoder, applied to all functions.
+	 * =0: external encoder1, mapped to external encoder enum id1
+	 * =1: external encoder2, mapped to external encoder enum id2
+	 *
+	 * enum ObjectEnumId
+	 * {
+	 *  EnumId_Unknown = 0,
+	 *  EnumId_1,
+	 *  EnumId_2,
+	 * };
+	 */
+	cntl_params->ucConfig = (uint8_t)((encoder.enum_id - 1) << 4);
+
+	switch (cntl->action) {
+	case EXTERNAL_ENCODER_CONTROL_INIT:
+		/* output display connector type. Only valid in encoder
+		 * initialization */
+		cntl_params->usConnectorId =
+				cpu_to_le16((uint16_t)cntl->connector_obj_id.id);
+		break;
+	case EXTERNAL_ENCODER_CONTROL_SETUP:
+		/* EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 pixel clock unit in
+		 * 10KHz
+		 * output display device pixel clock frequency in unit of 10KHz.
+		 * Only valid in setup and enableoutput
+		 */
+		cntl_params->usPixelClock =
+				cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+		/* Indicate display output signal type drive by external
+		 * encoder, only valid in setup and enableoutput */
+		cntl_params->ucEncoderMode =
+				(uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+						cntl->signal, false);
+
+		if (is_input_signal_dp) {
+			/* Bit[0]: indicate link rate, =1: 2.7Ghz, =0: 1.62Ghz,
+			 * only valid in encoder setup with DP mode. */
+			if (LINK_RATE_HIGH == cntl->link_rate)
+				cntl_params->ucConfig |= 1;
+			/* output color depth Indicate encoder data bpc format
+			 * in DP mode, only valid in encoder setup in DP mode.
+			 */
+			cntl_params->ucBitPerColor =
+					(uint8_t)(cntl->color_depth);
+		}
+		/* Indicate how many lanes used by external encoder, only valid
+		 * in encoder setup and enableoutput. */
+		cntl_params->ucLaneNum = (uint8_t)(cntl->lanes_number);
+		break;
+	case EXTERNAL_ENCODER_CONTROL_ENABLE:
+		cntl_params->usPixelClock =
+				cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+		cntl_params->ucEncoderMode =
+				(uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+						cntl->signal, false);
+		cntl_params->ucLaneNum = (uint8_t)cntl->lanes_number;
+		break;
+	default:
+		break;
+	}
+
+	cntl_params->ucAction = (uint8_t)cntl->action;
+
+	if (EXEC_BIOS_CMD_TABLE(ExternalEncoderControl, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  ENABLE DISPLAY POWER GATING
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result enable_disp_power_gating_v2_1(
+	struct bios_parser *bp,
+	enum controller_id crtc_id,
+	enum bp_pipe_control_action action);
+
+static void init_enable_disp_power_gating(
+	struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(EnableDispPowerGating)) {
+	case 1:
+		bp->cmd_tbl.enable_disp_power_gating =
+				enable_disp_power_gating_v2_1;
+		break;
+	default:
+		bp->cmd_tbl.enable_disp_power_gating = NULL;
+		break;
+	}
+}
+
+static enum bp_result enable_disp_power_gating_v2_1(
+	struct bios_parser *bp,
+	enum controller_id crtc_id,
+	enum bp_pipe_control_action action)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+
+	ENABLE_DISP_POWER_GATING_PS_ALLOCATION params = {0};
+	uint8_t atom_crtc_id;
+
+	if (bp->cmd_helper->controller_id_to_atom(crtc_id, &atom_crtc_id))
+		params.ucDispPipeId = atom_crtc_id;
+	else
+		return BP_RESULT_BADINPUT;
+
+	params.ucEnable =
+			bp->cmd_helper->disp_power_gating_action_to_atom(action);
+
+	if (EXEC_BIOS_CMD_TABLE(EnableDispPowerGating, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ **                  SET DCE CLOCK
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result set_dce_clock_v2_1(
+	struct bios_parser *bp,
+	struct bp_set_dce_clock_parameters *bp_params);
+
+static void init_set_dce_clock(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(SetDCEClock)) {
+	case 1:
+		bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
+		break;
+	default:
+		bp->cmd_tbl.set_dce_clock = NULL;
+		break;
+	}
+}
+
+static enum bp_result set_dce_clock_v2_1(
+	struct bios_parser *bp,
+	struct bp_set_dce_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+
+	SET_DCE_CLOCK_PS_ALLOCATION_V2_1 params;
+	uint32_t atom_pll_id;
+	uint32_t atom_clock_type;
+	const struct command_table_helper *cmd = bp->cmd_helper;
+
+	memset(&params, 0, sizeof(params));
+
+	if (!cmd->clock_source_id_to_atom(bp_params->pll_id, &atom_pll_id) ||
+			!cmd->dc_clock_type_to_atom(bp_params->clock_type, &atom_clock_type))
+		return BP_RESULT_BADINPUT;
+
+	params.asParam.ucDCEClkSrc  = atom_pll_id;
+	params.asParam.ucDCEClkType = atom_clock_type;
+
+	if (bp_params->clock_type == DCECLOCK_TYPE_DPREFCLK) {
+		if (bp_params->flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK)
+			params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENLK;
+
+		if (bp_params->flags.USE_PCIE_AS_SOURCE_FOR_DPREFCLK)
+			params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_PCIE;
+
+		if (bp_params->flags.USE_XTALIN_AS_SOURCE_FOR_DPREFCLK)
+			params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_XTALIN;
+
+		if (bp_params->flags.USE_GENERICA_AS_SOURCE_FOR_DPREFCLK)
+			params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENERICA;
+	}
+	else
+		/* only program clock frequency if display clock is used; VBIOS will program DPREFCLK */
+		/* We need to convert from KHz units into 10KHz units */
+		params.asParam.ulDCEClkFreq = cpu_to_le32(bp_params->target_clock_frequency / 10);
+
+	if (EXEC_BIOS_CMD_TABLE(SetDCEClock, params)) {
+		/* Convert from 10KHz units back to KHz */
+		bp_params->target_clock_frequency = le32_to_cpu(params.asParam.ulDCEClkFreq) * 10;
+		result = BP_RESULT_OK;
+	}
+
+	return result;
+}

+ 102 - 0
drivers/gpu/drm/amd/display/dc/bios/command_table.h

@@ -0,0 +1,102 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_H__
+#define __DAL_COMMAND_TABLE_H__
+
+struct bios_parser;
+struct bp_encoder_control;
+
+struct cmd_tbl {
+	enum bp_result (*dig_encoder_control)(
+		struct bios_parser *bp,
+		struct bp_encoder_control *control);
+	enum bp_result (*encoder_control_dig1)(
+		struct bios_parser *bp,
+		struct bp_encoder_control *control);
+	enum bp_result (*encoder_control_dig2)(
+		struct bios_parser *bp,
+		struct bp_encoder_control *control);
+	enum bp_result (*transmitter_control)(
+		struct bios_parser *bp,
+		struct bp_transmitter_control *control);
+	enum bp_result (*set_pixel_clock)(
+		struct bios_parser *bp,
+		struct bp_pixel_clock_parameters *bp_params);
+	enum bp_result (*enable_spread_spectrum_on_ppll)(
+		struct bios_parser *bp,
+		struct bp_spread_spectrum_parameters *bp_params,
+		bool enable);
+	enum bp_result (*adjust_display_pll)(
+		struct bios_parser *bp,
+		struct bp_adjust_pixel_clock_parameters *bp_params);
+	enum bp_result (*dac1_encoder_control)(
+		struct bios_parser *bp,
+		bool enable,
+		uint32_t pixel_clock,
+		uint8_t dac_standard);
+	enum bp_result (*dac2_encoder_control)(
+		struct bios_parser *bp,
+		bool enable,
+		uint32_t pixel_clock,
+		uint8_t dac_standard);
+	enum bp_result (*dac1_output_control)(
+		struct bios_parser *bp,
+		bool enable);
+	enum bp_result (*dac2_output_control)(
+		struct bios_parser *bp,
+		bool enable);
+	enum bp_result (*set_crtc_timing)(
+		struct bios_parser *bp,
+		struct bp_hw_crtc_timing_parameters *bp_params);
+	enum bp_result (*select_crtc_source)(
+		struct bios_parser *bp,
+		struct bp_crtc_source_select *bp_params);
+	enum bp_result (*enable_crtc)(
+		struct bios_parser *bp,
+		enum controller_id controller_id,
+		bool enable);
+	enum bp_result (*enable_crtc_mem_req)(
+		struct bios_parser *bp,
+		enum controller_id controller_id,
+		bool enable);
+	enum bp_result (*program_clock)(
+		struct bios_parser *bp,
+		struct bp_pixel_clock_parameters *bp_params);
+	enum bp_result (*external_encoder_control)(
+			struct bios_parser *bp,
+			struct bp_external_encoder_control *cntl);
+	enum bp_result (*enable_disp_power_gating)(
+		struct bios_parser *bp,
+		enum controller_id crtc_id,
+		enum bp_pipe_control_action action);
+	enum bp_result (*set_dce_clock)(
+		struct bios_parser *bp,
+		struct bp_set_dce_clock_parameters *bp_params);
+};
+
+void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp);
+
+#endif

+ 812 - 0
drivers/gpu/drm/amd/display/dc/bios/command_table2.c

@@ -0,0 +1,812 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "ObjectID.h"
+#include "atomfirmware.h"
+
+#include "include/bios_parser_interface.h"
+
+#include "command_table2.h"
+#include "command_table_helper2.h"
+#include "bios_parser_helper.h"
+#include "bios_parser_types_internal2.h"
+
+#define GET_INDEX_INTO_MASTER_TABLE(MasterOrData, FieldName)\
+	(((char *)(&((\
+		struct atom_master_list_of_##MasterOrData##_functions_v2_1 *)0)\
+		->FieldName)-(char *)0)/sizeof(uint16_t))
+
+#define EXEC_BIOS_CMD_TABLE(fname, params)\
+	(cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \
+		GET_INDEX_INTO_MASTER_TABLE(command, fname), \
+		&params) == 0)
+
+#define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\
+	cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \
+		GET_INDEX_INTO_MASTER_TABLE(command, fname), &frev, &crev)
+
+#define BIOS_CMD_TABLE_PARA_REVISION(fname)\
+	bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \
+			GET_INDEX_INTO_MASTER_TABLE(command, fname))
+
+static void init_dig_encoder_control(struct bios_parser *bp);
+static void init_transmitter_control(struct bios_parser *bp);
+static void init_set_pixel_clock(struct bios_parser *bp);
+
+static void init_set_crtc_timing(struct bios_parser *bp);
+
+static void init_select_crtc_source(struct bios_parser *bp);
+static void init_enable_crtc(struct bios_parser *bp);
+
+static void init_external_encoder_control(struct bios_parser *bp);
+static void init_enable_disp_power_gating(struct bios_parser *bp);
+static void init_set_dce_clock(struct bios_parser *bp);
+static void init_get_smu_clock_info(struct bios_parser *bp);
+
+void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
+{
+	init_dig_encoder_control(bp);
+	init_transmitter_control(bp);
+	init_set_pixel_clock(bp);
+
+	init_set_crtc_timing(bp);
+
+	init_select_crtc_source(bp);
+	init_enable_crtc(bp);
+
+	init_external_encoder_control(bp);
+	init_enable_disp_power_gating(bp);
+	init_set_dce_clock(bp);
+	init_get_smu_clock_info(bp);
+}
+
+static uint32_t bios_cmd_table_para_revision(void *cgs_device,
+					     uint32_t index)
+{
+	uint8_t frev, crev;
+
+	if (cgs_atom_get_cmd_table_revs(cgs_device,
+					index,
+					&frev, &crev) != 0)
+		return 0;
+	return crev;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ **                  D I G E N C O D E R C O N T R O L
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result encoder_control_digx_v1_5(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl);
+
+static void init_dig_encoder_control(struct bios_parser *bp)
+{
+	uint32_t version =
+		BIOS_CMD_TABLE_PARA_REVISION(digxencodercontrol);
+
+	switch (version) {
+	case 5:
+		bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5;
+		break;
+	default:
+		bp->cmd_tbl.dig_encoder_control = NULL;
+		break;
+	}
+}
+
+static enum bp_result encoder_control_digx_v1_5(
+	struct bios_parser *bp,
+	struct bp_encoder_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	struct dig_encoder_stream_setup_parameters_v1_5 params = {0};
+
+	params.digid = (uint8_t)(cntl->engine_id);
+	params.action = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+
+	params.pclk_10khz = cntl->pixel_clock / 10;
+	params.digmode =
+			(uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+					cntl->signal,
+					cntl->enable_dp_audio));
+	params.lanenum = (uint8_t)(cntl->lanes_number);
+
+	switch (cntl->color_depth) {
+	case COLOR_DEPTH_888:
+		params.bitpercolor = PANEL_8BIT_PER_COLOR;
+		break;
+	case COLOR_DEPTH_101010:
+		params.bitpercolor = PANEL_10BIT_PER_COLOR;
+		break;
+	case COLOR_DEPTH_121212:
+		params.bitpercolor = PANEL_12BIT_PER_COLOR;
+		break;
+	case COLOR_DEPTH_161616:
+		params.bitpercolor = PANEL_16BIT_PER_COLOR;
+		break;
+	default:
+		break;
+	}
+
+	if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+		switch (cntl->color_depth) {
+		case COLOR_DEPTH_101010:
+			params.pclk_10khz =
+				(params.pclk_10khz * 30) / 24;
+			break;
+		case COLOR_DEPTH_121212:
+			params.pclk_10khz =
+				(params.pclk_10khz * 36) / 24;
+			break;
+		case COLOR_DEPTH_161616:
+			params.pclk_10khz =
+				(params.pclk_10khz * 48) / 24;
+			break;
+		default:
+			break;
+		}
+
+	if (EXEC_BIOS_CMD_TABLE(digxencodercontrol, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/*****************************************************************************
+ ******************************************************************************
+ **
+ **                  TRANSMITTER CONTROL
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result transmitter_control_v1_6(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl);
+
+static void init_transmitter_control(struct bios_parser *bp)
+{
+	uint8_t frev;
+	uint8_t crev;
+
+	if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) != 0)
+		BREAK_TO_DEBUGGER();
+	switch (crev) {
+	case 6:
+		bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
+		break;
+	default:
+		bp->cmd_tbl.transmitter_control = NULL;
+		break;
+	}
+}
+
+static enum bp_result transmitter_control_v1_6(
+	struct bios_parser *bp,
+	struct bp_transmitter_control *cntl)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	const struct command_table_helper *cmd = bp->cmd_helper;
+	struct dig_transmitter_control_ps_allocation_v1_6 ps = { { 0 } };
+
+	ps.param.phyid = cmd->phy_id_to_atom(cntl->transmitter);
+	ps.param.action = (uint8_t)cntl->action;
+
+	if (cntl->action == TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS)
+		ps.param.mode_laneset.dplaneset = (uint8_t)cntl->lane_settings;
+	else
+		ps.param.mode_laneset.digmode =
+				cmd->signal_type_to_atom_dig_mode(cntl->signal);
+
+	ps.param.lanenum = (uint8_t)cntl->lanes_number;
+	ps.param.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel);
+	ps.param.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id);
+	ps.param.connobj_id = (uint8_t)cntl->connector_obj_id.id;
+	ps.param.symclk_10khz = cntl->pixel_clock/10;
+
+
+	if (cntl->action == TRANSMITTER_CONTROL_ENABLE ||
+		cntl->action == TRANSMITTER_CONTROL_ACTIAVATE ||
+		cntl->action == TRANSMITTER_CONTROL_DEACTIVATE) {
+		dm_logger_write(bp->base.ctx->logger, LOG_BIOS,\
+		"%s:ps.param.symclk_10khz = %d\n",\
+		__func__, ps.param.symclk_10khz);
+	}
+
+
+/*color_depth not used any more, driver has deep color factor in the Phyclk*/
+	if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, ps))
+		result = BP_RESULT_OK;
+	return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ **                  SET PIXEL CLOCK
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result set_pixel_clock_v7(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params);
+
+static void init_set_pixel_clock(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)) {
+	case 7:
+		bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
+		break;
+	default:
+		bp->cmd_tbl.set_pixel_clock = NULL;
+		break;
+	}
+}
+
+
+
+static enum bp_result set_pixel_clock_v7(
+	struct bios_parser *bp,
+	struct bp_pixel_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	struct set_pixel_clock_parameter_v1_7 clk;
+	uint8_t controller_id;
+	uint32_t pll_id;
+
+	memset(&clk, 0, sizeof(clk));
+
+	if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+			&& bp->cmd_helper->controller_id_to_atom(bp_params->
+					controller_id, &controller_id)) {
+		/* Note: VBIOS still wants to use ucCRTC name which is now
+		 * 1 byte in ULONG
+		 *typedef struct _CRTC_PIXEL_CLOCK_FREQ
+		 *{
+		 * target the pixel clock to drive the CRTC timing.
+		 * ULONG ulPixelClock:24;
+		 * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
+		 * previous version.
+		 * ATOM_CRTC1~6, indicate the CRTC controller to
+		 * ULONG ucCRTC:8;
+		 * drive the pixel clock. not used for DCPLL case.
+		 *}CRTC_PIXEL_CLOCK_FREQ;
+		 *union
+		 *{
+		 * pixel clock and CRTC id frequency
+		 * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
+		 * ULONG ulDispEngClkFreq; dispclk frequency
+		 *};
+		 */
+		clk.crtc_id = controller_id;
+		clk.pll_id = (uint8_t) pll_id;
+		clk.encoderobjid =
+			bp->cmd_helper->encoder_id_to_atom(
+				dal_graphics_object_id_get_encoder_id(
+					bp_params->encoder_object_id));
+
+		clk.encoder_mode = (uint8_t) bp->
+			cmd_helper->encoder_mode_bp_to_atom(
+				bp_params->signal_type, false);
+
+		/* We need to convert from KHz units into 10KHz units */
+		clk.pixclk_100hz = cpu_to_le32(bp_params->target_pixel_clock *
+				10);
+
+		clk.deep_color_ratio =
+			(uint8_t) bp->cmd_helper->
+				transmitter_color_depth_to_atom(
+					bp_params->color_depth);
+		dm_logger_write(bp->base.ctx->logger, LOG_BIOS,\
+				"%s:program display clock = %d"\
+				"colorDepth = %d\n", __func__,\
+				bp_params->target_pixel_clock, bp_params->color_depth);
+
+		if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+			clk.miscinfo |= PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL;
+
+		if (bp_params->flags.PROGRAM_PHY_PLL_ONLY)
+			clk.miscinfo |= PIXEL_CLOCK_V7_MISC_PROG_PHYPLL;
+
+		if (bp_params->flags.SUPPORT_YUV_420)
+			clk.miscinfo |= PIXEL_CLOCK_V7_MISC_YUV420_MODE;
+
+		if (bp_params->flags.SET_XTALIN_REF_SRC)
+			clk.miscinfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_XTALIN;
+
+		if (bp_params->flags.SET_GENLOCK_REF_DIV_SRC)
+			clk.miscinfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_GENLK;
+
+		if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
+			clk.miscinfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
+
+		if (EXEC_BIOS_CMD_TABLE(setpixelclock, clk))
+			result = BP_RESULT_OK;
+	}
+	return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ **                  SET CRTC TIMING
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+	struct bios_parser *bp,
+	struct bp_hw_crtc_timing_parameters *bp_params);
+
+static void init_set_crtc_timing(struct bios_parser *bp)
+{
+	uint32_t dtd_version =
+			BIOS_CMD_TABLE_PARA_REVISION(setcrtc_usingdtdtiming);
+
+	switch (dtd_version) {
+	case 3:
+		bp->cmd_tbl.set_crtc_timing =
+			set_crtc_using_dtd_timing_v3;
+		break;
+	default:
+		bp->cmd_tbl.set_crtc_timing = NULL;
+		break;
+	}
+}
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+	struct bios_parser *bp,
+	struct bp_hw_crtc_timing_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+	struct set_crtc_using_dtd_timing_parameters params = {0};
+	uint8_t atom_controller_id;
+
+	if (bp->cmd_helper->controller_id_to_atom(
+			bp_params->controller_id, &atom_controller_id))
+		params.crtc_id = atom_controller_id;
+
+	/* bios usH_Size wants h addressable size */
+	params.h_size = cpu_to_le16((uint16_t)bp_params->h_addressable);
+	/* bios usH_Blanking_Time wants borders included in blanking */
+	params.h_blanking_time =
+			cpu_to_le16((uint16_t)(bp_params->h_total -
+					bp_params->h_addressable));
+	/* bios usV_Size wants v addressable size */
+	params.v_size = cpu_to_le16((uint16_t)bp_params->v_addressable);
+	/* bios usV_Blanking_Time wants borders included in blanking */
+	params.v_blanking_time =
+			cpu_to_le16((uint16_t)(bp_params->v_total -
+					bp_params->v_addressable));
+	/* bios usHSyncOffset is the offset from the end of h addressable,
+	 * our horizontalSyncStart is the offset from the beginning
+	 * of h addressable
+	 */
+	params.h_syncoffset =
+			cpu_to_le16((uint16_t)(bp_params->h_sync_start -
+					bp_params->h_addressable));
+	params.h_syncwidth = cpu_to_le16((uint16_t)bp_params->h_sync_width);
+	/* bios usHSyncOffset is the offset from the end of v addressable,
+	 * our verticalSyncStart is the offset from the beginning of
+	 * v addressable
+	 */
+	params.v_syncoffset =
+			cpu_to_le16((uint16_t)(bp_params->v_sync_start -
+					bp_params->v_addressable));
+	params.v_syncwidth = cpu_to_le16((uint16_t)bp_params->v_sync_width);
+
+	/* we assume that overscan from original timing does not get bigger
+	 * than 255
+	 * we will program all the borders in the Set CRTC Overscan call below
+	 */
+
+	if (bp_params->flags.HSYNC_POSITIVE_POLARITY == 0)
+		params.modemiscinfo =
+				cpu_to_le16(le16_to_cpu(params.modemiscinfo) |
+						ATOM_HSYNC_POLARITY);
+
+	if (bp_params->flags.VSYNC_POSITIVE_POLARITY == 0)
+		params.modemiscinfo =
+				cpu_to_le16(le16_to_cpu(params.modemiscinfo) |
+						ATOM_VSYNC_POLARITY);
+
+	if (bp_params->flags.INTERLACE)	{
+		params.modemiscinfo =
+				cpu_to_le16(le16_to_cpu(params.modemiscinfo) |
+						ATOM_INTERLACE);
+
+		/* original DAL code has this condition to apply this
+		 * for non-TV/CV only
+		 * due to complex MV testing for possible impact
+		 * if ( pACParameters->signal != SignalType_YPbPr &&
+		 *  pACParameters->signal != SignalType_Composite &&
+		 *  pACParameters->signal != SignalType_SVideo)
+		 */
+		{
+			/* HW will deduct 0.5 line from 2nd feild.
+			 * i.e. for 1080i, it is 2 lines for 1st field,
+			 * 2.5 lines for the 2nd feild. we need input as 5
+			 * instead of 4.
+			 * but it is 4 either from Edid data (spec CEA 861)
+			 * or CEA timing table.
+			 */
+			params.v_syncoffset =
+				cpu_to_le16(le16_to_cpu(params.v_syncoffset) +
+						1);
+
+		}
+	}
+
+	if (bp_params->flags.HORZ_COUNT_BY_TWO)
+		params.modemiscinfo =
+			cpu_to_le16(le16_to_cpu(params.modemiscinfo) |
+					0x100); /* ATOM_DOUBLE_CLOCK_MODE */
+
+	if (EXEC_BIOS_CMD_TABLE(setcrtc_usingdtdtiming, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ **                  SELECT CRTC SOURCE
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+
+static enum bp_result select_crtc_source_v3(
+	struct bios_parser *bp,
+	struct bp_crtc_source_select *bp_params);
+
+static void init_select_crtc_source(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(selectcrtc_source)) {
+	case 3:
+		bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
+		break;
+	default:
+		bp->cmd_tbl.select_crtc_source = NULL;
+		break;
+	}
+}
+
+
+static enum bp_result select_crtc_source_v3(
+	struct bios_parser *bp,
+	struct bp_crtc_source_select *bp_params)
+{
+	bool result = BP_RESULT_FAILURE;
+	struct select_crtc_source_parameters_v2_3 params;
+	uint8_t atom_controller_id;
+	uint32_t atom_engine_id;
+	enum signal_type s = bp_params->signal;
+
+	memset(&params, 0, sizeof(params));
+
+	if (bp->cmd_helper->controller_id_to_atom(bp_params->controller_id,
+			&atom_controller_id))
+		params.crtc_id = atom_controller_id;
+	else
+		return result;
+
+	if (bp->cmd_helper->engine_bp_to_atom(bp_params->engine_id,
+			&atom_engine_id))
+		params.encoder_id = (uint8_t)atom_engine_id;
+	else
+		return result;
+
+	if (s == SIGNAL_TYPE_EDP ||
+		(s == SIGNAL_TYPE_DISPLAY_PORT && bp_params->sink_signal ==
+							SIGNAL_TYPE_LVDS))
+		s = SIGNAL_TYPE_LVDS;
+
+	params.encode_mode =
+			bp->cmd_helper->encoder_mode_bp_to_atom(
+					s, bp_params->enable_dp_audio);
+	/* Needed for VBIOS Random Spatial Dithering feature */
+	params.dst_bpc = (uint8_t)(bp_params->display_output_bit_depth);
+
+	if (EXEC_BIOS_CMD_TABLE(selectcrtc_source, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ **                  ENABLE CRTC
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result enable_crtc_v1(
+	struct bios_parser *bp,
+	enum controller_id controller_id,
+	bool enable);
+
+static void init_enable_crtc(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(enablecrtc)) {
+	case 1:
+		bp->cmd_tbl.enable_crtc = enable_crtc_v1;
+		break;
+	default:
+		bp->cmd_tbl.enable_crtc = NULL;
+		break;
+	}
+}
+
+static enum bp_result enable_crtc_v1(
+	struct bios_parser *bp,
+	enum controller_id controller_id,
+	bool enable)
+{
+	bool result = BP_RESULT_FAILURE;
+	struct enable_crtc_parameters params = {0};
+	uint8_t id;
+
+	if (bp->cmd_helper->controller_id_to_atom(controller_id, &id))
+		params.crtc_id = id;
+	else
+		return BP_RESULT_BADINPUT;
+
+	if (enable)
+		params.enable = ATOM_ENABLE;
+	else
+		params.enable = ATOM_DISABLE;
+
+	if (EXEC_BIOS_CMD_TABLE(enablecrtc, params))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ **                  DISPLAY PLL
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ **                  EXTERNAL ENCODER CONTROL
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result external_encoder_control_v3(
+	struct bios_parser *bp,
+	struct bp_external_encoder_control *cntl);
+
+static void init_external_encoder_control(
+	struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(externalencodercontrol)) {
+	case 3:
+		bp->cmd_tbl.external_encoder_control =
+				external_encoder_control_v3;
+		break;
+	default:
+		bp->cmd_tbl.external_encoder_control = NULL;
+		break;
+	}
+}
+
+static enum bp_result external_encoder_control_v3(
+	struct bios_parser *bp,
+	struct bp_external_encoder_control *cntl)
+{
+	/* TODO */
+	return BP_RESULT_OK;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ **                  ENABLE DISPLAY POWER GATING
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result enable_disp_power_gating_v2_1(
+	struct bios_parser *bp,
+	enum controller_id crtc_id,
+	enum bp_pipe_control_action action);
+
+static void init_enable_disp_power_gating(
+	struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating)) {
+	case 1:
+		bp->cmd_tbl.enable_disp_power_gating =
+				enable_disp_power_gating_v2_1;
+		break;
+	default:
+		bp->cmd_tbl.enable_disp_power_gating = NULL;
+		break;
+	}
+}
+
+static enum bp_result enable_disp_power_gating_v2_1(
+	struct bios_parser *bp,
+	enum controller_id crtc_id,
+	enum bp_pipe_control_action action)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+
+
+	struct enable_disp_power_gating_ps_allocation ps = { { 0 } };
+	uint8_t atom_crtc_id;
+
+	if (bp->cmd_helper->controller_id_to_atom(crtc_id, &atom_crtc_id))
+		ps.param.disp_pipe_id = atom_crtc_id;
+	else
+		return BP_RESULT_BADINPUT;
+
+	ps.param.enable =
+		bp->cmd_helper->disp_power_gating_action_to_atom(action);
+
+	if (EXEC_BIOS_CMD_TABLE(enabledisppowergating, ps.param))
+		result = BP_RESULT_OK;
+
+	return result;
+}
+
+/******************************************************************************
+*******************************************************************************
+ **
+ **                  SET DCE CLOCK
+ **
+*******************************************************************************
+*******************************************************************************/
+
+static enum bp_result set_dce_clock_v2_1(
+	struct bios_parser *bp,
+	struct bp_set_dce_clock_parameters *bp_params);
+
+static void init_set_dce_clock(struct bios_parser *bp)
+{
+	switch (BIOS_CMD_TABLE_PARA_REVISION(setdceclock)) {
+	case 1:
+		bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
+		break;
+	default:
+		bp->cmd_tbl.set_dce_clock = NULL;
+		break;
+	}
+}
+
+static enum bp_result set_dce_clock_v2_1(
+	struct bios_parser *bp,
+	struct bp_set_dce_clock_parameters *bp_params)
+{
+	enum bp_result result = BP_RESULT_FAILURE;
+
+	struct set_dce_clock_ps_allocation_v2_1 params;
+	uint32_t atom_pll_id;
+	uint32_t atom_clock_type;
+	const struct command_table_helper *cmd = bp->cmd_helper;
+
+	memset(&params, 0, sizeof(params));
+
+	if (!cmd->clock_source_id_to_atom(bp_params->pll_id, &atom_pll_id) ||
+			!cmd->dc_clock_type_to_atom(bp_params->clock_type,
+					&atom_clock_type))
+		return BP_RESULT_BADINPUT;
+
+	params.param.dceclksrc  = atom_pll_id;
+	params.param.dceclktype = atom_clock_type;
+
+	if (bp_params->clock_type == DCECLOCK_TYPE_DPREFCLK) {
+		if (bp_params->flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK)
+			params.param.dceclkflag |=
+					DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENLK;
+
+		if (bp_params->flags.USE_PCIE_AS_SOURCE_FOR_DPREFCLK)
+			params.param.dceclkflag |=
+					DCE_CLOCK_FLAG_PLL_REFCLK_SRC_PCIE;
+
+		if (bp_params->flags.USE_XTALIN_AS_SOURCE_FOR_DPREFCLK)
+			params.param.dceclkflag |=
+					DCE_CLOCK_FLAG_PLL_REFCLK_SRC_XTALIN;
+
+		if (bp_params->flags.USE_GENERICA_AS_SOURCE_FOR_DPREFCLK)
+			params.param.dceclkflag |=
+					DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENERICA;
+	} else
+		/* only program clock frequency if display clock is used;
+		 * VBIOS will program DPREFCLK
+		 * We need to convert from KHz units into 10KHz units
+		 */
+		params.param.dceclk_10khz = cpu_to_le32(
+				bp_params->target_clock_frequency / 10);
+	dm_logger_write(bp->base.ctx->logger, LOG_BIOS,
+			"%s:target_clock_frequency = %d"\
+			"clock_type = %d \n", __func__,\
+			bp_params->target_clock_frequency,\
+			bp_params->clock_type);
+
+	if (EXEC_BIOS_CMD_TABLE(setdceclock, params)) {
+		/* Convert from 10KHz units back to KHz */
+		bp_params->target_clock_frequency = le32_to_cpu(
+				params.param.dceclk_10khz) * 10;
+		result = BP_RESULT_OK;
+	}
+
+	return result;
+}
+
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ **                  GET SMU CLOCK INFO
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp);
+
+static void init_get_smu_clock_info(struct bios_parser *bp)
+{
+	/* TODO add switch for table vrsion */
+	bp->cmd_tbl.get_smu_clock_info = get_smu_clock_info_v3_1;
+
+}
+
+static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp)
+{
+	struct atom_get_smu_clock_info_parameters_v3_1 smu_input = {0};
+	struct atom_get_smu_clock_info_output_parameters_v3_1 smu_output;
+
+	smu_input.command = GET_SMU_CLOCK_INFO_V3_1_GET_PLLVCO_FREQ;
+
+	/* Get Specific Clock */
+	if (EXEC_BIOS_CMD_TABLE(getsmuclockinfo, smu_input)) {
+		memmove(&smu_output, &smu_input, sizeof(
+			struct atom_get_smu_clock_info_parameters_v3_1));
+		return smu_output.atom_smu_outputclkfreq.syspllvcofreq_10khz;
+	}
+
+	return 0;
+}
+

+ 105 - 0
drivers/gpu/drm/amd/display/dc/bios/command_table2.h

@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE2_H__
+#define __DAL_COMMAND_TABLE2_H__
+
+struct bios_parser;
+struct bp_encoder_control;
+
+struct cmd_tbl {
+	enum bp_result (*dig_encoder_control)(
+		struct bios_parser *bp,
+		struct bp_encoder_control *control);
+	enum bp_result (*encoder_control_dig1)(
+		struct bios_parser *bp,
+		struct bp_encoder_control *control);
+	enum bp_result (*encoder_control_dig2)(
+		struct bios_parser *bp,
+		struct bp_encoder_control *control);
+	enum bp_result (*transmitter_control)(
+		struct bios_parser *bp,
+		struct bp_transmitter_control *control);
+	enum bp_result (*set_pixel_clock)(
+		struct bios_parser *bp,
+		struct bp_pixel_clock_parameters *bp_params);
+	enum bp_result (*enable_spread_spectrum_on_ppll)(
+		struct bios_parser *bp,
+		struct bp_spread_spectrum_parameters *bp_params,
+		bool enable);
+	enum bp_result (*adjust_display_pll)(
+		struct bios_parser *bp,
+		struct bp_adjust_pixel_clock_parameters *bp_params);
+	enum bp_result (*dac1_encoder_control)(
+		struct bios_parser *bp,
+		bool enable,
+		uint32_t pixel_clock,
+		uint8_t dac_standard);
+	enum bp_result (*dac2_encoder_control)(
+		struct bios_parser *bp,
+		bool enable,
+		uint32_t pixel_clock,
+		uint8_t dac_standard);
+	enum bp_result (*dac1_output_control)(
+		struct bios_parser *bp,
+		bool enable);
+	enum bp_result (*dac2_output_control)(
+		struct bios_parser *bp,
+		bool enable);
+	enum bp_result (*set_crtc_timing)(
+		struct bios_parser *bp,
+		struct bp_hw_crtc_timing_parameters *bp_params);
+	enum bp_result (*select_crtc_source)(
+		struct bios_parser *bp,
+		struct bp_crtc_source_select *bp_params);
+	enum bp_result (*enable_crtc)(
+		struct bios_parser *bp,
+		enum controller_id controller_id,
+		bool enable);
+	enum bp_result (*enable_crtc_mem_req)(
+		struct bios_parser *bp,
+		enum controller_id controller_id,
+		bool enable);
+	enum bp_result (*program_clock)(
+		struct bios_parser *bp,
+		struct bp_pixel_clock_parameters *bp_params);
+	enum bp_result (*external_encoder_control)(
+			struct bios_parser *bp,
+			struct bp_external_encoder_control *cntl);
+	enum bp_result (*enable_disp_power_gating)(
+		struct bios_parser *bp,
+		enum controller_id crtc_id,
+		enum bp_pipe_control_action action);
+	enum bp_result (*set_dce_clock)(
+		struct bios_parser *bp,
+		struct bp_set_dce_clock_parameters *bp_params);
+	unsigned int (*get_smu_clock_info)(
+			struct bios_parser *bp);
+
+};
+
+void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp);
+
+#endif

+ 290 - 0
drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c

@@ -0,0 +1,290 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "command_table_helper.h"
+
+bool dal_bios_parser_init_cmd_tbl_helper(
+	const struct command_table_helper **h,
+	enum dce_version dce)
+{
+	switch (dce) {
+	case DCE_VERSION_8_0:
+	case DCE_VERSION_8_1:
+	case DCE_VERSION_8_3:
+		*h = dal_cmd_tbl_helper_dce80_get_table();
+		return true;
+
+	case DCE_VERSION_10_0:
+		*h = dal_cmd_tbl_helper_dce110_get_table();
+		return true;
+
+	case DCE_VERSION_11_0:
+		*h = dal_cmd_tbl_helper_dce110_get_table();
+		return true;
+
+	case DCE_VERSION_11_2:
+		*h = dal_cmd_tbl_helper_dce112_get_table();
+		return true;
+
+	default:
+		/* Unsupported DCE */
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+}
+
+/* real implementations */
+
+bool dal_cmd_table_helper_controller_id_to_atom(
+	enum controller_id id,
+	uint8_t *atom_id)
+{
+	if (atom_id == NULL) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	switch (id) {
+	case CONTROLLER_ID_D0:
+		*atom_id = ATOM_CRTC1;
+		return true;
+	case CONTROLLER_ID_D1:
+		*atom_id = ATOM_CRTC2;
+		return true;
+	case CONTROLLER_ID_D2:
+		*atom_id = ATOM_CRTC3;
+		return true;
+	case CONTROLLER_ID_D3:
+		*atom_id = ATOM_CRTC4;
+		return true;
+	case CONTROLLER_ID_D4:
+		*atom_id = ATOM_CRTC5;
+		return true;
+	case CONTROLLER_ID_D5:
+		*atom_id = ATOM_CRTC6;
+		return true;
+	case CONTROLLER_ID_UNDERLAY0:
+		*atom_id = ATOM_UNDERLAY_PIPE0;
+		return true;
+	case CONTROLLER_ID_UNDEFINED:
+		*atom_id = ATOM_CRTC_INVALID;
+		return true;
+	default:
+		/* Wrong controller id */
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+}
+
+/**
+* translate_transmitter_bp_to_atom
+*
+* @brief
+*  Translate the Transmitter to the corresponding ATOM BIOS value
+*
+* @param
+*   input transmitter
+*   output digitalTransmitter
+*    // =00: Digital Transmitter1 ( UNIPHY linkAB )
+*    // =01: Digital Transmitter2 ( UNIPHY linkCD )
+*    // =02: Digital Transmitter3 ( UNIPHY linkEF )
+*/
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
+	enum transmitter t)
+{
+	switch (t) {
+	case TRANSMITTER_UNIPHY_A:
+	case TRANSMITTER_UNIPHY_B:
+	case TRANSMITTER_TRAVIS_LCD:
+		return 0;
+	case TRANSMITTER_UNIPHY_C:
+	case TRANSMITTER_UNIPHY_D:
+		return 1;
+	case TRANSMITTER_UNIPHY_E:
+	case TRANSMITTER_UNIPHY_F:
+		return 2;
+	default:
+		/* Invalid Transmitter Type! */
+		BREAK_TO_DEBUGGER();
+		return 0;
+	}
+}
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom(
+	enum signal_type s,
+	bool enable_dp_audio)
+{
+	switch (s) {
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+		return ATOM_ENCODER_MODE_DVI;
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		return ATOM_ENCODER_MODE_HDMI;
+	case SIGNAL_TYPE_LVDS:
+		return ATOM_ENCODER_MODE_LVDS;
+	case SIGNAL_TYPE_EDP:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_VIRTUAL:
+		if (enable_dp_audio)
+			return ATOM_ENCODER_MODE_DP_AUDIO;
+		else
+			return ATOM_ENCODER_MODE_DP;
+	case SIGNAL_TYPE_RGB:
+		return ATOM_ENCODER_MODE_CRT;
+	default:
+		return ATOM_ENCODER_MODE_CRT;
+	}
+}
+
+void dal_cmd_table_helper_assign_control_parameter(
+	const struct command_table_helper *h,
+	struct bp_encoder_control *control,
+	DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param)
+{
+	/* there are three transmitter blocks, each one has two links 4-lanes
+	 * each, A+B, C+D, E+F, Uniphy A, C and E are enumerated as link 0 in
+	 * each transmitter block B, D and F as link 1, third transmitter block
+	 * has non splitable links (UniphyE and UniphyF can not be configured
+	 * separately to drive two different streams)
+	 */
+	if ((control->transmitter == TRANSMITTER_UNIPHY_B) ||
+		(control->transmitter == TRANSMITTER_UNIPHY_D) ||
+		(control->transmitter == TRANSMITTER_UNIPHY_F)) {
+		/* Bit2: Link Select
+		 * =0: PHY linkA/C/E
+		 * =1: PHY linkB/D/F
+		 */
+		ctrl_param->acConfig.ucLinkSel = 1;
+	}
+
+	/* Bit[4:3]: Transmitter Selection
+	 * =00: Digital Transmitter1 ( UNIPHY linkAB )
+	 * =01: Digital Transmitter2 ( UNIPHY linkCD )
+	 * =02: Digital Transmitter3 ( UNIPHY linkEF )
+	 * =03: Reserved
+	 */
+	ctrl_param->acConfig.ucTransmitterSel =
+		(uint8_t)(h->transmitter_bp_to_atom(control->transmitter));
+
+	/* We need to convert from KHz units into 10KHz units */
+	ctrl_param->ucAction = h->encoder_action_to_atom(control->action);
+	ctrl_param->usPixelClock = cpu_to_le16((uint16_t)(control->pixel_clock / 10));
+	ctrl_param->ucEncoderMode =
+		(uint8_t)(h->encoder_mode_bp_to_atom(
+			control->signal, control->enable_dp_audio));
+	ctrl_param->ucLaneNum = (uint8_t)(control->lanes_number);
+}
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src(
+	enum clock_source_id id,
+	uint32_t *ref_clk_src_id)
+{
+	if (ref_clk_src_id == NULL) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	switch (id) {
+	case CLOCK_SOURCE_ID_PLL1:
+		*ref_clk_src_id = ENCODER_REFCLK_SRC_P1PLL;
+		return true;
+	case CLOCK_SOURCE_ID_PLL2:
+		*ref_clk_src_id = ENCODER_REFCLK_SRC_P2PLL;
+		return true;
+	case CLOCK_SOURCE_ID_DCPLL:
+		*ref_clk_src_id = ENCODER_REFCLK_SRC_DCPLL;
+		return true;
+	case CLOCK_SOURCE_ID_EXTERNAL:
+		*ref_clk_src_id = ENCODER_REFCLK_SRC_EXTCLK;
+		return true;
+	case CLOCK_SOURCE_ID_UNDEFINED:
+		*ref_clk_src_id = ENCODER_REFCLK_SRC_INVALID;
+		return true;
+	default:
+		/* Unsupported clock source id */
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+}
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom(
+	enum encoder_id id)
+{
+	switch (id) {
+	case ENCODER_ID_INTERNAL_LVDS:
+		return ENCODER_OBJECT_ID_INTERNAL_LVDS;
+	case ENCODER_ID_INTERNAL_TMDS1:
+		return ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+	case ENCODER_ID_INTERNAL_TMDS2:
+		return ENCODER_OBJECT_ID_INTERNAL_TMDS2;
+	case ENCODER_ID_INTERNAL_DAC1:
+		return ENCODER_OBJECT_ID_INTERNAL_DAC1;
+	case ENCODER_ID_INTERNAL_DAC2:
+		return ENCODER_OBJECT_ID_INTERNAL_DAC2;
+	case ENCODER_ID_INTERNAL_LVTM1:
+		return ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+	case ENCODER_ID_INTERNAL_HDMI:
+		return ENCODER_OBJECT_ID_HDMI_INTERNAL;
+	case ENCODER_ID_EXTERNAL_TRAVIS:
+		return ENCODER_OBJECT_ID_TRAVIS;
+	case ENCODER_ID_EXTERNAL_NUTMEG:
+		return ENCODER_OBJECT_ID_NUTMEG;
+	case ENCODER_ID_INTERNAL_KLDSCP_TMDS1:
+		return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+	case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+		return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+	case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+		return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+	case ENCODER_ID_EXTERNAL_MVPU_FPGA:
+		return ENCODER_OBJECT_ID_MVPU_FPGA;
+	case ENCODER_ID_INTERNAL_DDI:
+		return ENCODER_OBJECT_ID_INTERNAL_DDI;
+	case ENCODER_ID_INTERNAL_UNIPHY:
+		return ENCODER_OBJECT_ID_INTERNAL_UNIPHY;
+	case ENCODER_ID_INTERNAL_KLDSCP_LVTMA:
+		return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA;
+	case ENCODER_ID_INTERNAL_UNIPHY1:
+		return ENCODER_OBJECT_ID_INTERNAL_UNIPHY1;
+	case ENCODER_ID_INTERNAL_UNIPHY2:
+		return ENCODER_OBJECT_ID_INTERNAL_UNIPHY2;
+	case ENCODER_ID_INTERNAL_UNIPHY3:
+		return ENCODER_OBJECT_ID_INTERNAL_UNIPHY3;
+	case ENCODER_ID_INTERNAL_WIRELESS:
+		return ENCODER_OBJECT_ID_INTERNAL_VCE;
+	case ENCODER_ID_UNKNOWN:
+		return ENCODER_OBJECT_ID_NONE;
+	default:
+		/* Invalid encoder id */
+		BREAK_TO_DEBUGGER();
+		return ENCODER_OBJECT_ID_NONE;
+	}
+}

+ 90 - 0
drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h

@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_H__
+#define __DAL_COMMAND_TABLE_HELPER_H__
+
+#include "dce80/command_table_helper_dce80.h"
+#include "dce110/command_table_helper_dce110.h"
+#include "dce112/command_table_helper_dce112.h"
+
+struct command_table_helper {
+	bool (*controller_id_to_atom)(enum controller_id id, uint8_t *atom_id);
+	uint8_t (*encoder_action_to_atom)(
+			enum bp_encoder_control_action action);
+	uint32_t (*encoder_mode_bp_to_atom)(enum signal_type s,
+			bool enable_dp_audio);
+	bool (*engine_bp_to_atom)(enum engine_id engine_id,
+			uint32_t *atom_engine_id);
+	void (*assign_control_parameter)(
+			const struct command_table_helper *h,
+			struct bp_encoder_control *control,
+			DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param);
+	bool (*clock_source_id_to_atom)(enum clock_source_id id,
+			uint32_t *atom_pll_id);
+	bool (*clock_source_id_to_ref_clk_src)(
+			enum clock_source_id id,
+			uint32_t *ref_clk_src_id);
+	uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
+	uint8_t (*encoder_id_to_atom)(enum encoder_id id);
+	uint8_t (*clock_source_id_to_atom_phy_clk_src_id)(
+			enum clock_source_id id);
+	uint8_t (*signal_type_to_atom_dig_mode)(enum signal_type s);
+	uint8_t (*hpd_sel_to_atom)(enum hpd_source_id id);
+	uint8_t (*dig_encoder_sel_to_atom)(enum engine_id engine_id);
+	uint8_t (*phy_id_to_atom)(enum transmitter t);
+	uint8_t (*disp_power_gating_action_to_atom)(
+			enum bp_pipe_control_action action);
+	bool (*dc_clock_type_to_atom)(enum bp_dce_clock_type id,
+			uint32_t *atom_clock_type);
+    uint8_t (*transmitter_color_depth_to_atom)(enum transmitter_color_depth id);
+};
+
+bool dal_bios_parser_init_cmd_tbl_helper(const struct command_table_helper **h,
+	enum dce_version dce);
+
+bool dal_cmd_table_helper_controller_id_to_atom(
+	enum controller_id id,
+	uint8_t *atom_id);
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom(
+	enum signal_type s,
+	bool enable_dp_audio);
+
+void dal_cmd_table_helper_assign_control_parameter(
+	const struct command_table_helper *h,
+	struct bp_encoder_control *control,
+DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param);
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src(
+	enum clock_source_id id,
+	uint32_t *ref_clk_src_id);
+
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
+	enum transmitter t);
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom(
+	enum encoder_id id);
+#endif

+ 265 - 0
drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c

@@ -0,0 +1,265 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "ObjectID.h"
+#include "atomfirmware.h"
+
+#include "include/bios_parser_types.h"
+
+#include "command_table_helper2.h"
+
+bool dal_bios_parser_init_cmd_tbl_helper2(
+	const struct command_table_helper **h,
+	enum dce_version dce)
+{
+	switch (dce) {
+	case DCE_VERSION_8_0:
+	case DCE_VERSION_8_1:
+	case DCE_VERSION_8_3:
+		*h = dal_cmd_tbl_helper_dce80_get_table();
+		return true;
+
+	case DCE_VERSION_10_0:
+		*h = dal_cmd_tbl_helper_dce110_get_table();
+		return true;
+
+	case DCE_VERSION_11_0:
+		*h = dal_cmd_tbl_helper_dce110_get_table();
+		return true;
+
+	case DCE_VERSION_11_2:
+		*h = dal_cmd_tbl_helper_dce112_get_table2();
+		return true;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+	case DCN_VERSION_1_0:
+		*h = dal_cmd_tbl_helper_dce112_get_table2();
+		return true;
+#endif
+
+	case DCE_VERSION_12_0:
+		*h = dal_cmd_tbl_helper_dce112_get_table2();
+		return true;
+
+	default:
+		/* Unsupported DCE */
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+}
+
+/* real implementations */
+
+bool dal_cmd_table_helper_controller_id_to_atom2(
+	enum controller_id id,
+	uint8_t *atom_id)
+{
+	if (atom_id == NULL) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	switch (id) {
+	case CONTROLLER_ID_D0:
+		*atom_id = ATOM_CRTC1;
+		return true;
+	case CONTROLLER_ID_D1:
+		*atom_id = ATOM_CRTC2;
+		return true;
+	case CONTROLLER_ID_D2:
+		*atom_id = ATOM_CRTC3;
+		return true;
+	case CONTROLLER_ID_D3:
+		*atom_id = ATOM_CRTC4;
+		return true;
+	case CONTROLLER_ID_D4:
+		*atom_id = ATOM_CRTC5;
+		return true;
+	case CONTROLLER_ID_D5:
+		*atom_id = ATOM_CRTC6;
+		return true;
+	/* TODO :case CONTROLLER_ID_UNDERLAY0:
+		*atom_id = ATOM_UNDERLAY_PIPE0;
+		return true;
+	*/
+	case CONTROLLER_ID_UNDEFINED:
+		*atom_id = ATOM_CRTC_INVALID;
+		return true;
+	default:
+		/* Wrong controller id */
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+}
+
+/**
+* translate_transmitter_bp_to_atom
+*
+* @brief
+*  Translate the Transmitter to the corresponding ATOM BIOS value
+*
+* @param
+*   input transmitter
+*   output digitalTransmitter
+*    // =00: Digital Transmitter1 ( UNIPHY linkAB )
+*    // =01: Digital Transmitter2 ( UNIPHY linkCD )
+*    // =02: Digital Transmitter3 ( UNIPHY linkEF )
+*/
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom2(
+	enum transmitter t)
+{
+	switch (t) {
+	case TRANSMITTER_UNIPHY_A:
+	case TRANSMITTER_UNIPHY_B:
+	case TRANSMITTER_TRAVIS_LCD:
+		return 0;
+	case TRANSMITTER_UNIPHY_C:
+	case TRANSMITTER_UNIPHY_D:
+		return 1;
+	case TRANSMITTER_UNIPHY_E:
+	case TRANSMITTER_UNIPHY_F:
+		return 2;
+	default:
+		/* Invalid Transmitter Type! */
+		BREAK_TO_DEBUGGER();
+		return 0;
+	}
+}
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom2(
+	enum signal_type s,
+	bool enable_dp_audio)
+{
+	switch (s) {
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+		return ATOM_ENCODER_MODE_DVI;
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		return ATOM_ENCODER_MODE_HDMI;
+	case SIGNAL_TYPE_LVDS:
+		return ATOM_ENCODER_MODE_LVDS;
+	case SIGNAL_TYPE_EDP:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_VIRTUAL:
+		if (enable_dp_audio)
+			return ATOM_ENCODER_MODE_DP_AUDIO;
+		else
+			return ATOM_ENCODER_MODE_DP;
+	case SIGNAL_TYPE_RGB:
+		return ATOM_ENCODER_MODE_CRT;
+	default:
+		return ATOM_ENCODER_MODE_CRT;
+	}
+}
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src2(
+	enum clock_source_id id,
+	uint32_t *ref_clk_src_id)
+{
+	if (ref_clk_src_id == NULL) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	switch (id) {
+	case CLOCK_SOURCE_ID_PLL1:
+		*ref_clk_src_id = ENCODER_REFCLK_SRC_P1PLL;
+		return true;
+	case CLOCK_SOURCE_ID_PLL2:
+		*ref_clk_src_id = ENCODER_REFCLK_SRC_P2PLL;
+		return true;
+	/*TODO:case CLOCK_SOURCE_ID_DCPLL:
+		*ref_clk_src_id = ENCODER_REFCLK_SRC_DCPLL;
+		return true;
+	*/
+	case CLOCK_SOURCE_ID_EXTERNAL:
+		*ref_clk_src_id = ENCODER_REFCLK_SRC_EXTCLK;
+		return true;
+	case CLOCK_SOURCE_ID_UNDEFINED:
+		*ref_clk_src_id = ENCODER_REFCLK_SRC_INVALID;
+		return true;
+	default:
+		/* Unsupported clock source id */
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+}
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom2(
+	enum encoder_id id)
+{
+	switch (id) {
+	case ENCODER_ID_INTERNAL_LVDS:
+		return ENCODER_OBJECT_ID_INTERNAL_LVDS;
+	case ENCODER_ID_INTERNAL_TMDS1:
+		return ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+	case ENCODER_ID_INTERNAL_TMDS2:
+		return ENCODER_OBJECT_ID_INTERNAL_TMDS2;
+	case ENCODER_ID_INTERNAL_DAC1:
+		return ENCODER_OBJECT_ID_INTERNAL_DAC1;
+	case ENCODER_ID_INTERNAL_DAC2:
+		return ENCODER_OBJECT_ID_INTERNAL_DAC2;
+	case ENCODER_ID_INTERNAL_LVTM1:
+		return ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+	case ENCODER_ID_INTERNAL_HDMI:
+		return ENCODER_OBJECT_ID_HDMI_INTERNAL;
+	case ENCODER_ID_EXTERNAL_TRAVIS:
+		return ENCODER_OBJECT_ID_TRAVIS;
+	case ENCODER_ID_EXTERNAL_NUTMEG:
+		return ENCODER_OBJECT_ID_NUTMEG;
+	case ENCODER_ID_INTERNAL_KLDSCP_TMDS1:
+		return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+	case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+		return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+	case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+		return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+	case ENCODER_ID_EXTERNAL_MVPU_FPGA:
+		return ENCODER_OBJECT_ID_MVPU_FPGA;
+	case ENCODER_ID_INTERNAL_DDI:
+		return ENCODER_OBJECT_ID_INTERNAL_DDI;
+	case ENCODER_ID_INTERNAL_UNIPHY:
+		return ENCODER_OBJECT_ID_INTERNAL_UNIPHY;
+	case ENCODER_ID_INTERNAL_KLDSCP_LVTMA:
+		return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA;
+	case ENCODER_ID_INTERNAL_UNIPHY1:
+		return ENCODER_OBJECT_ID_INTERNAL_UNIPHY1;
+	case ENCODER_ID_INTERNAL_UNIPHY2:
+		return ENCODER_OBJECT_ID_INTERNAL_UNIPHY2;
+	case ENCODER_ID_INTERNAL_UNIPHY3:
+		return ENCODER_OBJECT_ID_INTERNAL_UNIPHY3;
+	case ENCODER_ID_INTERNAL_WIRELESS:
+		return ENCODER_OBJECT_ID_INTERNAL_VCE;
+	case ENCODER_ID_INTERNAL_VIRTUAL:
+		return ENCODER_OBJECT_ID_NONE;
+	case ENCODER_ID_UNKNOWN:
+		return ENCODER_OBJECT_ID_NONE;
+	default:
+		/* Invalid encoder id */
+		BREAK_TO_DEBUGGER();
+		return ENCODER_OBJECT_ID_NONE;
+	}
+}

+ 82 - 0
drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h

@@ -0,0 +1,82 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER2_H__
+#define __DAL_COMMAND_TABLE_HELPER2_H__
+
+#include "dce80/command_table_helper_dce80.h"
+#include "dce110/command_table_helper_dce110.h"
+#include "dce112/command_table_helper2_dce112.h"
+
+struct command_table_helper {
+	bool (*controller_id_to_atom)(enum controller_id id, uint8_t *atom_id);
+	uint8_t (*encoder_action_to_atom)(
+			enum bp_encoder_control_action action);
+	uint32_t (*encoder_mode_bp_to_atom)(enum signal_type s,
+			bool enable_dp_audio);
+	bool (*engine_bp_to_atom)(enum engine_id engine_id,
+			uint32_t *atom_engine_id);
+	bool (*clock_source_id_to_atom)(enum clock_source_id id,
+			uint32_t *atom_pll_id);
+	bool (*clock_source_id_to_ref_clk_src)(
+			enum clock_source_id id,
+			uint32_t *ref_clk_src_id);
+	uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
+	uint8_t (*encoder_id_to_atom)(enum encoder_id id);
+	uint8_t (*clock_source_id_to_atom_phy_clk_src_id)(
+			enum clock_source_id id);
+	uint8_t (*signal_type_to_atom_dig_mode)(enum signal_type s);
+	uint8_t (*hpd_sel_to_atom)(enum hpd_source_id id);
+	uint8_t (*dig_encoder_sel_to_atom)(enum engine_id engine_id);
+	uint8_t (*phy_id_to_atom)(enum transmitter t);
+	uint8_t (*disp_power_gating_action_to_atom)(
+			enum bp_pipe_control_action action);
+	bool (*dc_clock_type_to_atom)(enum bp_dce_clock_type id,
+			uint32_t *atom_clock_type);
+	uint8_t (*transmitter_color_depth_to_atom)(
+			enum transmitter_color_depth id);
+};
+
+bool dal_bios_parser_init_cmd_tbl_helper2(const struct command_table_helper **h,
+	enum dce_version dce);
+
+bool dal_cmd_table_helper_controller_id_to_atom2(
+	enum controller_id id,
+	uint8_t *atom_id);
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom2(
+	enum signal_type s,
+	bool enable_dp_audio);
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src2(
+	enum clock_source_id id,
+	uint32_t *ref_clk_src_id);
+
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom2(
+	enum transmitter t);
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom2(
+	enum encoder_id id);
+#endif

+ 364 - 0
drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c

@@ -0,0 +1,364 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper.h"
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+	uint8_t atom_phy_id;
+
+	switch (t) {
+	case TRANSMITTER_UNIPHY_A:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+		break;
+	case TRANSMITTER_UNIPHY_B:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+		break;
+	case TRANSMITTER_UNIPHY_C:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+		break;
+	case TRANSMITTER_UNIPHY_D:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+		break;
+	case TRANSMITTER_UNIPHY_E:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+		break;
+	case TRANSMITTER_UNIPHY_F:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+		break;
+	case TRANSMITTER_UNIPHY_G:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+		break;
+	default:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+		break;
+	}
+	return atom_phy_id;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+	uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+
+	switch (s) {
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_EDP:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+		break;
+	case SIGNAL_TYPE_LVDS:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS;
+		break;
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+		break;
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI;
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST;
+		break;
+	default:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+		break;
+	}
+
+	return atom_dig_mode;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+		enum clock_source_id id)
+{
+	uint8_t atom_phy_clk_src_id = 0;
+
+	switch (id) {
+	case CLOCK_SOURCE_ID_PLL0:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+		break;
+	case CLOCK_SOURCE_ID_PLL1:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+		break;
+	case CLOCK_SOURCE_ID_PLL2:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+		break;
+	case CLOCK_SOURCE_ID_EXTERNAL:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+		break;
+	default:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+		break;
+	}
+
+	return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+	uint8_t atom_hpd_sel = 0;
+
+	switch (id) {
+	case HPD_SOURCEID1:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL;
+		break;
+	case HPD_SOURCEID2:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL;
+		break;
+	case HPD_SOURCEID3:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL;
+		break;
+	case HPD_SOURCEID4:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL;
+		break;
+	case HPD_SOURCEID5:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL;
+		break;
+	case HPD_SOURCEID6:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL;
+		break;
+	case HPD_SOURCEID_UNKNOWN:
+	default:
+		atom_hpd_sel = 0;
+		break;
+	}
+	return atom_hpd_sel >> 4;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+	uint8_t atom_dig_encoder_sel = 0;
+
+	switch (id) {
+	case ENGINE_ID_DIGA:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+		break;
+	case ENGINE_ID_DIGB:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL;
+		break;
+	case ENGINE_ID_DIGC:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL;
+		break;
+	case ENGINE_ID_DIGD:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL;
+		break;
+	case ENGINE_ID_DIGE:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL;
+		break;
+	case ENGINE_ID_DIGF:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL;
+		break;
+	case ENGINE_ID_DIGG:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL;
+		break;
+	case ENGINE_ID_UNKNOWN:
+		 /* No DIG_FRONT is associated to DIG_BACKEND */
+		atom_dig_encoder_sel = 0;
+		break;
+	default:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+		break;
+	}
+
+	return 0;
+}
+
+static bool clock_source_id_to_atom(
+	enum clock_source_id id,
+	uint32_t *atom_pll_id)
+{
+	bool result = true;
+
+	if (atom_pll_id != NULL)
+		switch (id) {
+		case CLOCK_SOURCE_ID_PLL0:
+			*atom_pll_id = ATOM_PPLL0;
+			break;
+		case CLOCK_SOURCE_ID_PLL1:
+			*atom_pll_id = ATOM_PPLL1;
+			break;
+		case CLOCK_SOURCE_ID_PLL2:
+			*atom_pll_id = ATOM_PPLL2;
+			break;
+		case CLOCK_SOURCE_ID_EXTERNAL:
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			break;
+		case CLOCK_SOURCE_ID_DFS:
+			*atom_pll_id = ATOM_EXT_PLL1;
+			break;
+		case CLOCK_SOURCE_ID_VCE:
+			/* for VCE encoding,
+			 * we need to pass in ATOM_PPLL_INVALID
+			 */
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			break;
+		case CLOCK_SOURCE_ID_DP_DTO:
+			/* When programming DP DTO PLL ID should be invalid */
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			break;
+		case CLOCK_SOURCE_ID_UNDEFINED:
+			/* Should not happen */
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			result = false;
+			break;
+		default:
+			result = false;
+			break;
+		}
+
+	return result;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+	bool result = false;
+
+	if (atom_engine_id != NULL)
+		switch (id) {
+		case ENGINE_ID_DIGA:
+			*atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGB:
+			*atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGC:
+			*atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGD:
+			*atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGE:
+			*atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGF:
+			*atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGG:
+			*atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DACA:
+			*atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+			result = true;
+			break;
+		default:
+			break;
+		}
+
+	return result;
+}
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+	uint8_t atom_action = 0;
+
+	switch (action) {
+	case ENCODER_CONTROL_ENABLE:
+		atom_action = ATOM_ENABLE;
+		break;
+	case ENCODER_CONTROL_DISABLE:
+		atom_action = ATOM_DISABLE;
+		break;
+	case ENCODER_CONTROL_SETUP:
+		atom_action = ATOM_ENCODER_CMD_SETUP;
+		break;
+	case ENCODER_CONTROL_INIT:
+		atom_action = ATOM_ENCODER_INIT;
+		break;
+	default:
+		BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+		break;
+	}
+
+	return atom_action;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+	enum bp_pipe_control_action action)
+{
+	uint8_t atom_pipe_action = 0;
+
+	switch (action) {
+	case ASIC_PIPE_DISABLE:
+		atom_pipe_action = ATOM_DISABLE;
+		break;
+	case ASIC_PIPE_ENABLE:
+		atom_pipe_action = ATOM_ENABLE;
+		break;
+	case ASIC_PIPE_INIT:
+		atom_pipe_action = ATOM_INIT;
+		break;
+	default:
+		ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+		break;
+	}
+
+	return atom_pipe_action;
+}
+
+/* function table */
+static const struct command_table_helper command_table_helper_funcs = {
+	.controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
+	.encoder_action_to_atom = encoder_action_to_atom,
+	.engine_bp_to_atom = engine_bp_to_atom,
+	.clock_source_id_to_atom = clock_source_id_to_atom,
+	.clock_source_id_to_atom_phy_clk_src_id =
+			clock_source_id_to_atom_phy_clk_src_id,
+	.signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+	.hpd_sel_to_atom = hpd_sel_to_atom,
+	.dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+	.phy_id_to_atom = phy_id_to_atom,
+	.disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+	.assign_control_parameter = NULL,
+	.clock_source_id_to_ref_clk_src = NULL,
+	.transmitter_bp_to_atom = NULL,
+	.encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
+	.encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom,
+};
+
+/*
+ * dal_cmd_tbl_helper_dce110_get_table
+ *
+ * @brief
+ * Initialize command table helper functions
+ *
+ * @param
+ * const struct command_table_helper **h - [out] struct of functions
+ *
+ */
+const struct command_table_helper *dal_cmd_tbl_helper_dce110_get_table(void)
+{
+	return &command_table_helper_funcs;
+}

+ 34 - 0
drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h

@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_DCE110_H__
+#define __DAL_COMMAND_TABLE_HELPER_DCE110_H__
+
+struct command_table_helper;
+
+/* Initialize command table helper functions */
+const struct command_table_helper *dal_cmd_tbl_helper_dce110_get_table(void);
+
+#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */

+ 418 - 0
drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c

@@ -0,0 +1,418 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper2.h"
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+	uint8_t atom_phy_id;
+
+	switch (t) {
+	case TRANSMITTER_UNIPHY_A:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+		break;
+	case TRANSMITTER_UNIPHY_B:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+		break;
+	case TRANSMITTER_UNIPHY_C:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+		break;
+	case TRANSMITTER_UNIPHY_D:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+		break;
+	case TRANSMITTER_UNIPHY_E:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+		break;
+	case TRANSMITTER_UNIPHY_F:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+		break;
+	case TRANSMITTER_UNIPHY_G:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+		break;
+	default:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+		break;
+	}
+	return atom_phy_id;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+	uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+
+	switch (s) {
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_EDP:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+		break;
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+		break;
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_HDMI;
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP_MST;
+		break;
+	default:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+		break;
+	}
+
+	return atom_dig_mode;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+		enum clock_source_id id)
+{
+	uint8_t atom_phy_clk_src_id = 0;
+
+	switch (id) {
+	case CLOCK_SOURCE_ID_PLL0:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+		break;
+	case CLOCK_SOURCE_ID_PLL1:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+		break;
+	case CLOCK_SOURCE_ID_PLL2:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+		break;
+	case CLOCK_SOURCE_ID_EXTERNAL:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+		break;
+	default:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+		break;
+	}
+
+	return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+	uint8_t atom_hpd_sel = 0;
+
+	switch (id) {
+	case HPD_SOURCEID1:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD1_SEL;
+		break;
+	case HPD_SOURCEID2:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD2_SEL;
+		break;
+	case HPD_SOURCEID3:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD3_SEL;
+		break;
+	case HPD_SOURCEID4:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD4_SEL;
+		break;
+	case HPD_SOURCEID5:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD5_SEL;
+		break;
+	case HPD_SOURCEID6:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD6_SEL;
+		break;
+	case HPD_SOURCEID_UNKNOWN:
+	default:
+		atom_hpd_sel = 0;
+		break;
+	}
+	return atom_hpd_sel;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+	uint8_t atom_dig_encoder_sel = 0;
+
+	switch (id) {
+	case ENGINE_ID_DIGA:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+		break;
+	case ENGINE_ID_DIGB:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGB_SEL;
+		break;
+	case ENGINE_ID_DIGC:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGC_SEL;
+		break;
+	case ENGINE_ID_DIGD:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGD_SEL;
+		break;
+	case ENGINE_ID_DIGE:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGE_SEL;
+		break;
+	case ENGINE_ID_DIGF:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGF_SEL;
+		break;
+	case ENGINE_ID_DIGG:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGG_SEL;
+		break;
+	case ENGINE_ID_UNKNOWN:
+		/* No DIG_FRONT is associated to DIG_BACKEND */
+		atom_dig_encoder_sel = 0;
+		break;
+	default:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+		break;
+	}
+
+	return 0;
+}
+
+static bool clock_source_id_to_atom(
+	enum clock_source_id id,
+	uint32_t *atom_pll_id)
+{
+	bool result = true;
+
+	if (atom_pll_id != NULL)
+		switch (id) {
+		case CLOCK_SOURCE_COMBO_PHY_PLL0:
+			*atom_pll_id = ATOM_COMBOPHY_PLL0;
+			break;
+		case CLOCK_SOURCE_COMBO_PHY_PLL1:
+			*atom_pll_id = ATOM_COMBOPHY_PLL1;
+			break;
+		case CLOCK_SOURCE_COMBO_PHY_PLL2:
+			*atom_pll_id = ATOM_COMBOPHY_PLL2;
+			break;
+		case CLOCK_SOURCE_COMBO_PHY_PLL3:
+			*atom_pll_id = ATOM_COMBOPHY_PLL3;
+			break;
+		case CLOCK_SOURCE_COMBO_PHY_PLL4:
+			*atom_pll_id = ATOM_COMBOPHY_PLL4;
+			break;
+		case CLOCK_SOURCE_COMBO_PHY_PLL5:
+			*atom_pll_id = ATOM_COMBOPHY_PLL5;
+			break;
+		case CLOCK_SOURCE_COMBO_DISPLAY_PLL0:
+			*atom_pll_id = ATOM_PPLL0;
+			break;
+		case CLOCK_SOURCE_ID_DFS:
+			*atom_pll_id = ATOM_GCK_DFS;
+			break;
+		case CLOCK_SOURCE_ID_VCE:
+			*atom_pll_id = ATOM_DP_DTO;
+			break;
+		case CLOCK_SOURCE_ID_DP_DTO:
+			*atom_pll_id = ATOM_DP_DTO;
+			break;
+		case CLOCK_SOURCE_ID_UNDEFINED:
+			/* Should not happen */
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			result = false;
+			break;
+		default:
+			result = false;
+			break;
+		}
+
+	return result;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+	bool result = false;
+
+	if (atom_engine_id != NULL)
+		switch (id) {
+		case ENGINE_ID_DIGA:
+			*atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGB:
+			*atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGC:
+			*atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGD:
+			*atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGE:
+			*atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGF:
+			*atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGG:
+			*atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DACA:
+			*atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+			result = true;
+			break;
+		default:
+			break;
+		}
+
+	return result;
+}
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+	uint8_t atom_action = 0;
+
+	switch (action) {
+	case ENCODER_CONTROL_ENABLE:
+		atom_action = ATOM_ENABLE;
+		break;
+	case ENCODER_CONTROL_DISABLE:
+		atom_action = ATOM_DISABLE;
+		break;
+	case ENCODER_CONTROL_SETUP:
+		atom_action = ATOM_ENCODER_CMD_STREAM_SETUP;
+		break;
+	case ENCODER_CONTROL_INIT:
+		atom_action = ATOM_ENCODER_INIT;
+		break;
+	default:
+		BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+		break;
+	}
+
+	return atom_action;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+	enum bp_pipe_control_action action)
+{
+	uint8_t atom_pipe_action = 0;
+
+	switch (action) {
+	case ASIC_PIPE_DISABLE:
+		atom_pipe_action = ATOM_DISABLE;
+		break;
+	case ASIC_PIPE_ENABLE:
+		atom_pipe_action = ATOM_ENABLE;
+		break;
+	case ASIC_PIPE_INIT:
+		atom_pipe_action = ATOM_INIT;
+		break;
+	default:
+		ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+		break;
+	}
+
+	return atom_pipe_action;
+}
+
+static bool dc_clock_type_to_atom(
+		enum bp_dce_clock_type id,
+		uint32_t *atom_clock_type)
+{
+	bool retCode = true;
+
+	if (atom_clock_type != NULL) {
+		switch (id) {
+		case DCECLOCK_TYPE_DISPLAY_CLOCK:
+			*atom_clock_type = DCE_CLOCK_TYPE_DISPCLK;
+			break;
+
+		case DCECLOCK_TYPE_DPREFCLK:
+			*atom_clock_type = DCE_CLOCK_TYPE_DPREFCLK;
+			break;
+
+		default:
+			ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+			break;
+		}
+	}
+
+	return retCode;
+}
+
+static uint8_t transmitter_color_depth_to_atom(enum transmitter_color_depth id)
+{
+	uint8_t atomColorDepth = 0;
+
+	switch (id) {
+	case TRANSMITTER_COLOR_DEPTH_24:
+		atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS;
+		break;
+	case TRANSMITTER_COLOR_DEPTH_30:
+		atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4;
+		break;
+	case TRANSMITTER_COLOR_DEPTH_36:
+		atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2;
+		break;
+	case TRANSMITTER_COLOR_DEPTH_48:
+		atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1;
+		break;
+	default:
+		ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+		break;
+	}
+
+	return atomColorDepth;
+}
+
+/* function table */
+static const struct command_table_helper command_table_helper_funcs = {
+	.controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom2,
+	.encoder_action_to_atom = encoder_action_to_atom,
+	.engine_bp_to_atom = engine_bp_to_atom,
+	.clock_source_id_to_atom = clock_source_id_to_atom,
+	.clock_source_id_to_atom_phy_clk_src_id =
+			clock_source_id_to_atom_phy_clk_src_id,
+	.signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+	.hpd_sel_to_atom = hpd_sel_to_atom,
+	.dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+	.phy_id_to_atom = phy_id_to_atom,
+	.disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+	.clock_source_id_to_ref_clk_src = NULL,
+	.transmitter_bp_to_atom = NULL,
+	.encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom2,
+	.encoder_mode_bp_to_atom =
+			dal_cmd_table_helper_encoder_mode_bp_to_atom2,
+	.dc_clock_type_to_atom = dc_clock_type_to_atom,
+	.transmitter_color_depth_to_atom = transmitter_color_depth_to_atom,
+};
+
+/*
+ * dal_cmd_tbl_helper_dce110_get_table
+ *
+ * @brief
+ * Initialize command table helper functions
+ *
+ * @param
+ * const struct command_table_helper **h - [out] struct of functions
+ *
+ */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table2(void)
+{
+	return &command_table_helper_funcs;
+}

+ 34 - 0
drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h

@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER2_DCE112_H__
+#define __DAL_COMMAND_TABLE_HELPER2_DCE112_H__
+
+struct command_table_helper;
+
+/* Initialize command table helper functions */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table2(void);
+
+#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */

+ 418 - 0
drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c

@@ -0,0 +1,418 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper.h"
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+	uint8_t atom_phy_id;
+
+	switch (t) {
+	case TRANSMITTER_UNIPHY_A:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+		break;
+	case TRANSMITTER_UNIPHY_B:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+		break;
+	case TRANSMITTER_UNIPHY_C:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+		break;
+	case TRANSMITTER_UNIPHY_D:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+		break;
+	case TRANSMITTER_UNIPHY_E:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+		break;
+	case TRANSMITTER_UNIPHY_F:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+		break;
+	case TRANSMITTER_UNIPHY_G:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+		break;
+	default:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+		break;
+	}
+	return atom_phy_id;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+	uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+
+	switch (s) {
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_EDP:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+		break;
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+		break;
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_HDMI;
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP_MST;
+		break;
+	default:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+		break;
+	}
+
+	return atom_dig_mode;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+		enum clock_source_id id)
+{
+	uint8_t atom_phy_clk_src_id = 0;
+
+	switch (id) {
+	case CLOCK_SOURCE_ID_PLL0:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+		break;
+	case CLOCK_SOURCE_ID_PLL1:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+		break;
+	case CLOCK_SOURCE_ID_PLL2:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+		break;
+	case CLOCK_SOURCE_ID_EXTERNAL:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+		break;
+	default:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+		break;
+	}
+
+	return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+	uint8_t atom_hpd_sel = 0;
+
+	switch (id) {
+	case HPD_SOURCEID1:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD1_SEL;
+		break;
+	case HPD_SOURCEID2:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD2_SEL;
+		break;
+	case HPD_SOURCEID3:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD3_SEL;
+		break;
+	case HPD_SOURCEID4:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD4_SEL;
+		break;
+	case HPD_SOURCEID5:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD5_SEL;
+		break;
+	case HPD_SOURCEID6:
+		atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD6_SEL;
+		break;
+	case HPD_SOURCEID_UNKNOWN:
+	default:
+		atom_hpd_sel = 0;
+		break;
+	}
+	return atom_hpd_sel;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+	uint8_t atom_dig_encoder_sel = 0;
+
+	switch (id) {
+	case ENGINE_ID_DIGA:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+		break;
+	case ENGINE_ID_DIGB:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGB_SEL;
+		break;
+	case ENGINE_ID_DIGC:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGC_SEL;
+		break;
+	case ENGINE_ID_DIGD:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGD_SEL;
+		break;
+	case ENGINE_ID_DIGE:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGE_SEL;
+		break;
+	case ENGINE_ID_DIGF:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGF_SEL;
+		break;
+	case ENGINE_ID_DIGG:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGG_SEL;
+		break;
+	case ENGINE_ID_UNKNOWN:
+		/* No DIG_FRONT is associated to DIG_BACKEND */
+		atom_dig_encoder_sel = 0;
+		break;
+	default:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+		break;
+	}
+
+	return 0;
+}
+
+static bool clock_source_id_to_atom(
+	enum clock_source_id id,
+	uint32_t *atom_pll_id)
+{
+	bool result = true;
+
+	if (atom_pll_id != NULL)
+		switch (id) {
+		case CLOCK_SOURCE_COMBO_PHY_PLL0:
+			*atom_pll_id = ATOM_COMBOPHY_PLL0;
+			break;
+		case CLOCK_SOURCE_COMBO_PHY_PLL1:
+			*atom_pll_id = ATOM_COMBOPHY_PLL1;
+			break;
+		case CLOCK_SOURCE_COMBO_PHY_PLL2:
+			*atom_pll_id = ATOM_COMBOPHY_PLL2;
+			break;
+		case CLOCK_SOURCE_COMBO_PHY_PLL3:
+			*atom_pll_id = ATOM_COMBOPHY_PLL3;
+			break;
+		case CLOCK_SOURCE_COMBO_PHY_PLL4:
+			*atom_pll_id = ATOM_COMBOPHY_PLL4;
+			break;
+		case CLOCK_SOURCE_COMBO_PHY_PLL5:
+			*atom_pll_id = ATOM_COMBOPHY_PLL5;
+			break;
+		case CLOCK_SOURCE_COMBO_DISPLAY_PLL0:
+			*atom_pll_id = ATOM_PPLL0;
+			break;
+		case CLOCK_SOURCE_ID_DFS:
+			*atom_pll_id = ATOM_GCK_DFS;
+			break;
+		case CLOCK_SOURCE_ID_VCE:
+			*atom_pll_id = ATOM_DP_DTO;
+			break;
+		case CLOCK_SOURCE_ID_DP_DTO:
+			*atom_pll_id = ATOM_DP_DTO;
+			break;
+		case CLOCK_SOURCE_ID_UNDEFINED:
+			/* Should not happen */
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			result = false;
+			break;
+		default:
+			result = false;
+			break;
+		}
+
+	return result;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+	bool result = false;
+
+	if (atom_engine_id != NULL)
+		switch (id) {
+		case ENGINE_ID_DIGA:
+			*atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGB:
+			*atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGC:
+			*atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGD:
+			*atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGE:
+			*atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGF:
+			*atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGG:
+			*atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DACA:
+			*atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+			result = true;
+			break;
+		default:
+			break;
+		}
+
+	return result;
+}
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+	uint8_t atom_action = 0;
+
+	switch (action) {
+	case ENCODER_CONTROL_ENABLE:
+		atom_action = ATOM_ENABLE;
+		break;
+	case ENCODER_CONTROL_DISABLE:
+		atom_action = ATOM_DISABLE;
+		break;
+	case ENCODER_CONTROL_SETUP:
+		atom_action = ATOM_ENCODER_CMD_STREAM_SETUP;
+		break;
+	case ENCODER_CONTROL_INIT:
+		atom_action = ATOM_ENCODER_INIT;
+		break;
+	default:
+		BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+		break;
+	}
+
+	return atom_action;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+	enum bp_pipe_control_action action)
+{
+	uint8_t atom_pipe_action = 0;
+
+	switch (action) {
+	case ASIC_PIPE_DISABLE:
+		atom_pipe_action = ATOM_DISABLE;
+		break;
+	case ASIC_PIPE_ENABLE:
+		atom_pipe_action = ATOM_ENABLE;
+		break;
+	case ASIC_PIPE_INIT:
+		atom_pipe_action = ATOM_INIT;
+		break;
+	default:
+		ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+		break;
+	}
+
+	return atom_pipe_action;
+}
+
+static bool dc_clock_type_to_atom(
+		enum bp_dce_clock_type id,
+		uint32_t *atom_clock_type)
+{
+	bool retCode = true;
+
+	if (atom_clock_type != NULL) {
+		switch (id) {
+		case DCECLOCK_TYPE_DISPLAY_CLOCK:
+			*atom_clock_type = DCE_CLOCK_TYPE_DISPCLK;
+			break;
+
+		case DCECLOCK_TYPE_DPREFCLK:
+			*atom_clock_type = DCE_CLOCK_TYPE_DPREFCLK;
+			break;
+
+		default:
+			ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+			break;
+		}
+	}
+
+	return retCode;
+}
+
+static uint8_t transmitter_color_depth_to_atom(enum transmitter_color_depth id)
+{
+	uint8_t atomColorDepth = 0;
+
+	switch (id) {
+	case TRANSMITTER_COLOR_DEPTH_24:
+		atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS;
+		break;
+	case TRANSMITTER_COLOR_DEPTH_30:
+		atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4;
+		break;
+	case TRANSMITTER_COLOR_DEPTH_36:
+		atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2;
+		break;
+	case TRANSMITTER_COLOR_DEPTH_48:
+		atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1;
+		break;
+	default:
+		ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+		break;
+	}
+
+	return atomColorDepth;
+}
+
+/* function table */
+static const struct command_table_helper command_table_helper_funcs = {
+	.controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
+	.encoder_action_to_atom = encoder_action_to_atom,
+	.engine_bp_to_atom = engine_bp_to_atom,
+	.clock_source_id_to_atom = clock_source_id_to_atom,
+	.clock_source_id_to_atom_phy_clk_src_id =
+			clock_source_id_to_atom_phy_clk_src_id,
+	.signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+	.hpd_sel_to_atom = hpd_sel_to_atom,
+	.dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+	.phy_id_to_atom = phy_id_to_atom,
+	.disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+	.assign_control_parameter = NULL,
+	.clock_source_id_to_ref_clk_src = NULL,
+	.transmitter_bp_to_atom = NULL,
+	.encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
+	.encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom,
+	.dc_clock_type_to_atom = dc_clock_type_to_atom,
+	.transmitter_color_depth_to_atom = transmitter_color_depth_to_atom,
+};
+
+/*
+ * dal_cmd_tbl_helper_dce110_get_table
+ *
+ * @brief
+ * Initialize command table helper functions
+ *
+ * @param
+ * const struct command_table_helper **h - [out] struct of functions
+ *
+ */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table(void)
+{
+	return &command_table_helper_funcs;
+}

+ 34 - 0
drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h

@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_DCE112_H__
+#define __DAL_COMMAND_TABLE_HELPER_DCE112_H__
+
+struct command_table_helper;
+
+/* Initialize command table helper functions */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table(void);
+
+#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */

+ 354 - 0
drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c

@@ -0,0 +1,354 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/grph_object_id.h"
+#include "include/grph_object_defs.h"
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper.h"
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+	uint8_t atom_action = 0;
+
+	switch (action) {
+	case ENCODER_CONTROL_ENABLE:
+		atom_action = ATOM_ENABLE;
+		break;
+	case ENCODER_CONTROL_DISABLE:
+		atom_action = ATOM_DISABLE;
+		break;
+	case ENCODER_CONTROL_SETUP:
+		atom_action = ATOM_ENCODER_CMD_SETUP;
+		break;
+	case ENCODER_CONTROL_INIT:
+		atom_action = ATOM_ENCODER_INIT;
+		break;
+	default:
+		BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+		break;
+	}
+
+	return atom_action;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+	bool result = false;
+
+	if (atom_engine_id != NULL)
+		switch (id) {
+		case ENGINE_ID_DIGA:
+			*atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGB:
+			*atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGC:
+			*atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGD:
+			*atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGE:
+			*atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGF:
+			*atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DIGG:
+			*atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+			result = true;
+			break;
+		case ENGINE_ID_DACA:
+			*atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+			result = true;
+			break;
+		default:
+			break;
+		}
+
+	return result;
+}
+
+static bool clock_source_id_to_atom(
+	enum clock_source_id id,
+	uint32_t *atom_pll_id)
+{
+	bool result = true;
+
+	if (atom_pll_id != NULL)
+		switch (id) {
+		case CLOCK_SOURCE_ID_PLL0:
+			*atom_pll_id = ATOM_PPLL0;
+			break;
+		case CLOCK_SOURCE_ID_PLL1:
+			*atom_pll_id = ATOM_PPLL1;
+			break;
+		case CLOCK_SOURCE_ID_PLL2:
+			*atom_pll_id = ATOM_PPLL2;
+			break;
+		case CLOCK_SOURCE_ID_EXTERNAL:
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			break;
+		case CLOCK_SOURCE_ID_DFS:
+			*atom_pll_id = ATOM_EXT_PLL1;
+			break;
+		case CLOCK_SOURCE_ID_VCE:
+			/* for VCE encoding,
+			 * we need to pass in ATOM_PPLL_INVALID
+			 */
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			break;
+		case CLOCK_SOURCE_ID_DP_DTO:
+			/* When programming DP DTO PLL ID should be invalid */
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			break;
+		case CLOCK_SOURCE_ID_UNDEFINED:
+			BREAK_TO_DEBUGGER(); /* check when this will happen! */
+			*atom_pll_id = ATOM_PPLL_INVALID;
+			result = false;
+			break;
+		default:
+			result = false;
+			break;
+		}
+
+	return result;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+		enum clock_source_id id)
+{
+	uint8_t atom_phy_clk_src_id = 0;
+
+	switch (id) {
+	case CLOCK_SOURCE_ID_PLL0:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+		break;
+	case CLOCK_SOURCE_ID_PLL1:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+		break;
+	case CLOCK_SOURCE_ID_PLL2:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+		break;
+	case CLOCK_SOURCE_ID_EXTERNAL:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+		break;
+	default:
+		atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+		break;
+	}
+
+	return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+	uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+
+	switch (s) {
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_EDP:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+		break;
+	case SIGNAL_TYPE_LVDS:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS;
+		break;
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+		break;
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI;
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST;
+		break;
+	default:
+		atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+		break;
+	}
+
+	return atom_dig_mode;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+	uint8_t atom_hpd_sel = 0;
+
+	switch (id) {
+	case HPD_SOURCEID1:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL;
+		break;
+	case HPD_SOURCEID2:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL;
+		break;
+	case HPD_SOURCEID3:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL;
+		break;
+	case HPD_SOURCEID4:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL;
+		break;
+	case HPD_SOURCEID5:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL;
+		break;
+	case HPD_SOURCEID6:
+		atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL;
+		break;
+	case HPD_SOURCEID_UNKNOWN:
+	default:
+		atom_hpd_sel = 0;
+		break;
+	}
+	return atom_hpd_sel >> 4;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+	uint8_t atom_dig_encoder_sel = 0;
+
+	switch (id) {
+	case ENGINE_ID_DIGA:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+		break;
+	case ENGINE_ID_DIGB:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL;
+		break;
+	case ENGINE_ID_DIGC:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL;
+		break;
+	case ENGINE_ID_DIGD:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL;
+		break;
+	case ENGINE_ID_DIGE:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL;
+		break;
+	case ENGINE_ID_DIGF:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL;
+		break;
+	case ENGINE_ID_DIGG:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL;
+		break;
+	default:
+		atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+		break;
+	}
+
+	return atom_dig_encoder_sel;
+}
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+	uint8_t atom_phy_id;
+
+	switch (t) {
+	case TRANSMITTER_UNIPHY_A:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+		break;
+	case TRANSMITTER_UNIPHY_B:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+		break;
+	case TRANSMITTER_UNIPHY_C:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+		break;
+	case TRANSMITTER_UNIPHY_D:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+		break;
+	case TRANSMITTER_UNIPHY_E:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+		break;
+	case TRANSMITTER_UNIPHY_F:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+		break;
+	case TRANSMITTER_UNIPHY_G:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+		break;
+	default:
+		atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+		break;
+	}
+	return atom_phy_id;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+	enum bp_pipe_control_action action)
+{
+	uint8_t atom_pipe_action = 0;
+
+	switch (action) {
+	case ASIC_PIPE_DISABLE:
+		atom_pipe_action = ATOM_DISABLE;
+		break;
+	case ASIC_PIPE_ENABLE:
+		atom_pipe_action = ATOM_ENABLE;
+		break;
+	case ASIC_PIPE_INIT:
+		atom_pipe_action = ATOM_INIT;
+		break;
+	default:
+		BREAK_TO_DEBUGGER(); /* Unhandle action in driver! */
+		break;
+	}
+
+	return atom_pipe_action;
+}
+
+static const struct command_table_helper command_table_helper_funcs = {
+	.controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
+	.encoder_action_to_atom = encoder_action_to_atom,
+	.engine_bp_to_atom = engine_bp_to_atom,
+	.clock_source_id_to_atom = clock_source_id_to_atom,
+	.clock_source_id_to_atom_phy_clk_src_id =
+		clock_source_id_to_atom_phy_clk_src_id,
+	.signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+	.hpd_sel_to_atom = hpd_sel_to_atom,
+	.dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+	.phy_id_to_atom = phy_id_to_atom,
+	.disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+	.assign_control_parameter =
+		dal_cmd_table_helper_assign_control_parameter,
+	.clock_source_id_to_ref_clk_src =
+		dal_cmd_table_helper_clock_source_id_to_ref_clk_src,
+	.transmitter_bp_to_atom = dal_cmd_table_helper_transmitter_bp_to_atom,
+	.encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
+	.encoder_mode_bp_to_atom =
+		dal_cmd_table_helper_encoder_mode_bp_to_atom,
+};
+
+const struct command_table_helper *dal_cmd_tbl_helper_dce80_get_table(void)
+{
+	return &command_table_helper_funcs;
+}

+ 33 - 0
drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h

@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_DCE80_H__
+#define __DAL_COMMAND_TABLE_HELPER_DCE80_H__
+
+struct command_table_helper;
+
+const struct command_table_helper *dal_cmd_tbl_helper_dce80_get_table(void);
+
+#endif

+ 18 - 0
drivers/gpu/drm/amd/display/dc/calcs/Makefile

@@ -0,0 +1,18 @@
+#
+# Makefile for the 'calcs' sub-component of DAL.
+# It calculates Bandwidth and Watermarks values for HW programming
+#
+
+CFLAGS_dcn_calcs.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_dcn_calc_auto.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_dcn_calc_math.o := -mhard-float -msse -mpreferred-stack-boundary=4 -Wno-tautological-compare
+
+BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
+
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o
+endif
+
+AMD_DAL_BW_CALCS = $(addprefix $(AMDDALPATH)/dc/calcs/,$(BW_CALCS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_BW_CALCS)

+ 191 - 0
drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c

@@ -0,0 +1,191 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "bw_fixed.h"
+
+
+#define MIN_I64 \
+	(int64_t)(-(1LL << 63))
+
+#define MAX_I64 \
+	(int64_t)((1ULL << 63) - 1)
+
+#define FRACTIONAL_PART_MASK \
+	((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1)
+
+#define GET_FRACTIONAL_PART(x) \
+	(FRACTIONAL_PART_MASK & (x))
+
+static uint64_t abs_i64(int64_t arg)
+{
+	if (arg >= 0)
+		return (uint64_t)(arg);
+	else
+		return (uint64_t)(-arg);
+}
+
+struct bw_fixed bw_int_to_fixed_nonconst(int64_t value)
+{
+	struct bw_fixed res;
+	ASSERT(value < BW_FIXED_MAX_I32 && value > BW_FIXED_MIN_I32);
+	res.value = value << BW_FIXED_BITS_PER_FRACTIONAL_PART;
+	return res;
+}
+
+struct bw_fixed bw_frc_to_fixed(int64_t numerator, int64_t denominator)
+{
+	struct bw_fixed res;
+	bool arg1_negative = numerator < 0;
+	bool arg2_negative = denominator < 0;
+	uint64_t arg1_value;
+	uint64_t arg2_value;
+	uint64_t remainder;
+
+	/* determine integer part */
+	uint64_t res_value;
+
+	ASSERT(denominator != 0);
+
+	arg1_value = abs_i64(numerator);
+	arg2_value = abs_i64(denominator);
+	res_value = div64_u64_rem(arg1_value, arg2_value, &remainder);
+
+	ASSERT(res_value <= BW_FIXED_MAX_I32);
+
+	/* determine fractional part */
+	{
+		uint32_t i = BW_FIXED_BITS_PER_FRACTIONAL_PART;
+
+		do
+		{
+			remainder <<= 1;
+
+			res_value <<= 1;
+
+			if (remainder >= arg2_value)
+			{
+				res_value |= 1;
+				remainder -= arg2_value;
+			}
+		} while (--i != 0);
+	}
+
+	/* round up LSB */
+	{
+		uint64_t summand = (remainder << 1) >= arg2_value;
+
+		ASSERT(res_value <= MAX_I64 - summand);
+
+		res_value += summand;
+	}
+
+	res.value = (int64_t)(res_value);
+
+	if (arg1_negative ^ arg2_negative)
+		res.value = -res.value;
+	return res;
+}
+
+struct bw_fixed bw_floor2(
+	const struct bw_fixed arg,
+	const struct bw_fixed significance)
+{
+	struct bw_fixed result;
+	int64_t multiplicand;
+
+	multiplicand = div64_s64(arg.value, abs_i64(significance.value));
+	result.value = abs_i64(significance.value) * multiplicand;
+	ASSERT(abs_i64(result.value) <= abs_i64(arg.value));
+	return result;
+}
+
+struct bw_fixed bw_ceil2(
+	const struct bw_fixed arg,
+	const struct bw_fixed significance)
+{
+	struct bw_fixed result;
+	int64_t multiplicand;
+
+	multiplicand = div64_s64(arg.value, abs_i64(significance.value));
+	result.value = abs_i64(significance.value) * multiplicand;
+	if (abs_i64(result.value) < abs_i64(arg.value)) {
+		if (arg.value < 0)
+			result.value -= abs_i64(significance.value);
+		else
+			result.value += abs_i64(significance.value);
+	}
+	return result;
+}
+
+struct bw_fixed bw_mul(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+	struct bw_fixed res;
+
+	bool arg1_negative = arg1.value < 0;
+	bool arg2_negative = arg2.value < 0;
+
+	uint64_t arg1_value = abs_i64(arg1.value);
+	uint64_t arg2_value = abs_i64(arg2.value);
+
+	uint64_t arg1_int = BW_FIXED_GET_INTEGER_PART(arg1_value);
+	uint64_t arg2_int = BW_FIXED_GET_INTEGER_PART(arg2_value);
+
+	uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
+	uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
+
+	uint64_t tmp;
+
+	res.value = arg1_int * arg2_int;
+
+	ASSERT(res.value <= BW_FIXED_MAX_I32);
+
+	res.value <<= BW_FIXED_BITS_PER_FRACTIONAL_PART;
+
+	tmp = arg1_int * arg2_fra;
+
+	ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
+
+	res.value += tmp;
+
+	tmp = arg2_int * arg1_fra;
+
+	ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
+
+	res.value += tmp;
+
+	tmp = arg1_fra * arg2_fra;
+
+	tmp = (tmp >> BW_FIXED_BITS_PER_FRACTIONAL_PART) +
+		(tmp >= (uint64_t)(bw_frc_to_fixed(1, 2).value));
+
+	ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
+
+	res.value += tmp;
+
+	if (arg1_negative ^ arg2_negative)
+		res.value = -res.value;
+	return res;
+}
+

+ 197 - 0
drivers/gpu/drm/amd/display/dc/calcs/custom_float.c

@@ -0,0 +1,197 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "custom_float.h"
+
+
+static bool build_custom_float(
+	struct fixed31_32 value,
+	const struct custom_float_format *format,
+	bool *negative,
+	uint32_t *mantissa,
+	uint32_t *exponenta)
+{
+	uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1;
+
+	const struct fixed31_32 mantissa_constant_plus_max_fraction =
+		dal_fixed31_32_from_fraction(
+			(1LL << (format->mantissa_bits + 1)) - 1,
+			1LL << format->mantissa_bits);
+
+	struct fixed31_32 mantiss;
+
+	if (dal_fixed31_32_eq(
+		value,
+		dal_fixed31_32_zero)) {
+		*negative = false;
+		*mantissa = 0;
+		*exponenta = 0;
+		return true;
+	}
+
+	if (dal_fixed31_32_lt(
+		value,
+		dal_fixed31_32_zero)) {
+		*negative = format->sign;
+		value = dal_fixed31_32_neg(value);
+	} else {
+		*negative = false;
+	}
+
+	if (dal_fixed31_32_lt(
+		value,
+		dal_fixed31_32_one)) {
+		uint32_t i = 1;
+
+		do {
+			value = dal_fixed31_32_shl(value, 1);
+			++i;
+		} while (dal_fixed31_32_lt(
+			value,
+			dal_fixed31_32_one));
+
+		--i;
+
+		if (exp_offset <= i) {
+			*mantissa = 0;
+			*exponenta = 0;
+			return true;
+		}
+
+		*exponenta = exp_offset - i;
+	} else if (dal_fixed31_32_le(
+		mantissa_constant_plus_max_fraction,
+		value)) {
+		uint32_t i = 1;
+
+		do {
+			value = dal_fixed31_32_shr(value, 1);
+			++i;
+		} while (dal_fixed31_32_lt(
+			mantissa_constant_plus_max_fraction,
+			value));
+
+		*exponenta = exp_offset + i - 1;
+	} else {
+		*exponenta = exp_offset;
+	}
+
+	mantiss = dal_fixed31_32_sub(
+		value,
+		dal_fixed31_32_one);
+
+	if (dal_fixed31_32_lt(
+			mantiss,
+			dal_fixed31_32_zero) ||
+		dal_fixed31_32_lt(
+			dal_fixed31_32_one,
+			mantiss))
+		mantiss = dal_fixed31_32_zero;
+	else
+		mantiss = dal_fixed31_32_shl(
+			mantiss,
+			format->mantissa_bits);
+
+	*mantissa = dal_fixed31_32_floor(mantiss);
+
+	return true;
+}
+
+static bool setup_custom_float(
+	const struct custom_float_format *format,
+	bool negative,
+	uint32_t mantissa,
+	uint32_t exponenta,
+	uint32_t *result)
+{
+	uint32_t i = 0;
+	uint32_t j = 0;
+
+	uint32_t value = 0;
+
+	/* verification code:
+	 * once calculation is ok we can remove it
+	 */
+
+	const uint32_t mantissa_mask =
+		(1 << (format->mantissa_bits + 1)) - 1;
+
+	const uint32_t exponenta_mask =
+		(1 << (format->exponenta_bits + 1)) - 1;
+
+	if (mantissa & ~mantissa_mask) {
+		BREAK_TO_DEBUGGER();
+		mantissa = mantissa_mask;
+	}
+
+	if (exponenta & ~exponenta_mask) {
+		BREAK_TO_DEBUGGER();
+		exponenta = exponenta_mask;
+	}
+
+	/* end of verification code */
+
+	while (i < format->mantissa_bits) {
+		uint32_t mask = 1 << i;
+
+		if (mantissa & mask)
+			value |= mask;
+
+		++i;
+	}
+
+	while (j < format->exponenta_bits) {
+		uint32_t mask = 1 << j;
+
+		if (exponenta & mask)
+			value |= mask << i;
+
+		++j;
+	}
+
+	if (negative && format->sign)
+		value |= 1 << (i + j);
+
+	*result = value;
+
+	return true;
+}
+
+bool convert_to_custom_float_format(
+	struct fixed31_32 value,
+	const struct custom_float_format *format,
+	uint32_t *result)
+{
+	uint32_t mantissa;
+	uint32_t exponenta;
+	bool negative;
+
+	return build_custom_float(
+		value, format, &negative, &mantissa, &exponenta) &&
+	setup_custom_float(
+		format, negative, mantissa, exponenta, result);
+}
+
+

+ 3257 - 0
drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c

@@ -0,0 +1,3257 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dce_calcs.h"
+#include "dc.h"
+#include "core_types.h"
+#include "dal_asic_id.h"
+
+/*******************************************************************************
+ * Private Functions
+ ******************************************************************************/
+
+static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asic_id)
+{
+	switch (asic_id.chip_family) {
+
+	case FAMILY_CZ:
+		if (ASIC_REV_IS_STONEY(asic_id.hw_internal_rev))
+			return BW_CALCS_VERSION_STONEY;
+		return BW_CALCS_VERSION_CARRIZO;
+
+	case FAMILY_VI:
+		if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev))
+			return BW_CALCS_VERSION_POLARIS10;
+		if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
+				ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev))
+			return BW_CALCS_VERSION_POLARIS11;
+		return BW_CALCS_VERSION_INVALID;
+
+	case FAMILY_AI:
+		return BW_CALCS_VERSION_VEGA10;
+
+	default:
+		return BW_CALCS_VERSION_INVALID;
+	}
+}
+
+static void calculate_bandwidth(
+	const struct bw_calcs_dceip *dceip,
+	const struct bw_calcs_vbios *vbios,
+	struct bw_calcs_data *data)
+
+{
+	const int32_t pixels_per_chunk = 512;
+	const int32_t high = 2;
+	const int32_t mid = 1;
+	const int32_t low = 0;
+	const uint32_t s_low = 0;
+	const uint32_t s_mid1 = 1;
+	const uint32_t s_mid2 = 2;
+	const uint32_t s_mid3 = 3;
+	const uint32_t s_mid4 = 4;
+	const uint32_t s_mid5 = 5;
+	const uint32_t s_mid6 = 6;
+	const uint32_t s_high = 7;
+	const uint32_t bus_efficiency = 1;
+	const uint32_t dmif_chunk_buff_margin = 1;
+
+	uint32_t max_chunks_fbc_mode;
+	int32_t num_cursor_lines;
+
+	int32_t i, j, k;
+	struct bw_fixed yclk[3];
+	struct bw_fixed sclk[8];
+	bool d0_underlay_enable;
+	bool d1_underlay_enable;
+	bool fbc_enabled;
+	bool lpt_enabled;
+	enum bw_defines sclk_message;
+	enum bw_defines yclk_message;
+	enum bw_defines v_filter_init_mode[maximum_number_of_surfaces];
+	enum bw_defines tiling_mode[maximum_number_of_surfaces];
+	enum bw_defines surface_type[maximum_number_of_surfaces];
+	enum bw_defines voltage;
+	enum bw_defines pipe_check;
+	enum bw_defines hsr_check;
+	enum bw_defines vsr_check;
+	enum bw_defines lb_size_check;
+	enum bw_defines fbc_check;
+	enum bw_defines rotation_check;
+	enum bw_defines mode_check;
+	enum bw_defines nbp_state_change_enable_blank;
+	/*initialize variables*/
+	int32_t number_of_displays_enabled = 0;
+	int32_t number_of_displays_enabled_with_margin = 0;
+	int32_t number_of_aligned_displays_with_no_margin = 0;
+
+	yclk[low] = vbios->low_yclk;
+	yclk[mid] = vbios->mid_yclk;
+	yclk[high] = vbios->high_yclk;
+	sclk[s_low] = vbios->low_sclk;
+	sclk[s_mid1] = vbios->mid1_sclk;
+	sclk[s_mid2] = vbios->mid2_sclk;
+	sclk[s_mid3] = vbios->mid3_sclk;
+	sclk[s_mid4] = vbios->mid4_sclk;
+	sclk[s_mid5] = vbios->mid5_sclk;
+	sclk[s_mid6] = vbios->mid6_sclk;
+	sclk[s_high] = vbios->high_sclk;
+	/*''''''''''''''''''*/
+	/* surface assignment:*/
+	/* 0: d0 underlay or underlay luma*/
+	/* 1: d0 underlay chroma*/
+	/* 2: d1 underlay or underlay luma*/
+	/* 3: d1 underlay chroma*/
+	/* 4: d0 graphics*/
+	/* 5: d1 graphics*/
+	/* 6: d2 graphics*/
+	/* 7: d3 graphics, same mode as d2*/
+	/* 8: d4 graphics, same mode as d2*/
+	/* 9: d5 graphics, same mode as d2*/
+	/* ...*/
+	/* maximum_number_of_surfaces-2: d1 display_write_back420 luma*/
+	/* maximum_number_of_surfaces-1: d1 display_write_back420 chroma*/
+	/* underlay luma and chroma surface parameters from spreadsheet*/
+
+
+
+
+	if (data->d0_underlay_mode == bw_def_none) { d0_underlay_enable = 0; }
+	else {
+		d0_underlay_enable = 1;
+	}
+	if (data->d1_underlay_mode == bw_def_none) { d1_underlay_enable = 0; }
+	else {
+		d1_underlay_enable = 1;
+	}
+	data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable;
+	switch (data->underlay_surface_type) {
+	case bw_def_420:
+		surface_type[0] = bw_def_underlay420_luma;
+		surface_type[2] = bw_def_underlay420_luma;
+		data->bytes_per_pixel[0] = 1;
+		data->bytes_per_pixel[2] = 1;
+		surface_type[1] = bw_def_underlay420_chroma;
+		surface_type[3] = bw_def_underlay420_chroma;
+		data->bytes_per_pixel[1] = 2;
+		data->bytes_per_pixel[3] = 2;
+		data->lb_size_per_component[0] = dceip->underlay420_luma_lb_size_per_component;
+		data->lb_size_per_component[1] = dceip->underlay420_chroma_lb_size_per_component;
+		data->lb_size_per_component[2] = dceip->underlay420_luma_lb_size_per_component;
+		data->lb_size_per_component[3] = dceip->underlay420_chroma_lb_size_per_component;
+		break;
+	case bw_def_422:
+		surface_type[0] = bw_def_underlay422;
+		surface_type[2] = bw_def_underlay422;
+		data->bytes_per_pixel[0] = 2;
+		data->bytes_per_pixel[2] = 2;
+		data->lb_size_per_component[0] = dceip->underlay422_lb_size_per_component;
+		data->lb_size_per_component[2] = dceip->underlay422_lb_size_per_component;
+		break;
+	default:
+		surface_type[0] = bw_def_underlay444;
+		surface_type[2] = bw_def_underlay444;
+		data->bytes_per_pixel[0] = 4;
+		data->bytes_per_pixel[2] = 4;
+		data->lb_size_per_component[0] = dceip->lb_size_per_component444;
+		data->lb_size_per_component[2] = dceip->lb_size_per_component444;
+		break;
+	}
+	if (d0_underlay_enable) {
+		switch (data->underlay_surface_type) {
+		case bw_def_420:
+			data->enable[0] = 1;
+			data->enable[1] = 1;
+			break;
+		default:
+			data->enable[0] = 1;
+			data->enable[1] = 0;
+			break;
+		}
+	}
+	else {
+		data->enable[0] = 0;
+		data->enable[1] = 0;
+	}
+	if (d1_underlay_enable) {
+		switch (data->underlay_surface_type) {
+		case bw_def_420:
+			data->enable[2] = 1;
+			data->enable[3] = 1;
+			break;
+		default:
+			data->enable[2] = 1;
+			data->enable[3] = 0;
+			break;
+		}
+	}
+	else {
+		data->enable[2] = 0;
+		data->enable[3] = 0;
+	}
+	data->use_alpha[0] = 0;
+	data->use_alpha[1] = 0;
+	data->use_alpha[2] = 0;
+	data->use_alpha[3] = 0;
+	data->scatter_gather_enable_for_pipe[0] = vbios->scatter_gather_enable;
+	data->scatter_gather_enable_for_pipe[1] = vbios->scatter_gather_enable;
+	data->scatter_gather_enable_for_pipe[2] = vbios->scatter_gather_enable;
+	data->scatter_gather_enable_for_pipe[3] = vbios->scatter_gather_enable;
+	/*underlay0 same and graphics display pipe0*/
+	data->interlace_mode[0] = data->interlace_mode[4];
+	data->interlace_mode[1] = data->interlace_mode[4];
+	/*underlay1 same and graphics display pipe1*/
+	data->interlace_mode[2] = data->interlace_mode[5];
+	data->interlace_mode[3] = data->interlace_mode[5];
+	/*underlay0 same and graphics display pipe0*/
+	data->h_total[0] = data->h_total[4];
+	data->v_total[0] = data->v_total[4];
+	data->h_total[1] = data->h_total[4];
+	data->v_total[1] = data->v_total[4];
+	/*underlay1 same and graphics display pipe1*/
+	data->h_total[2] = data->h_total[5];
+	data->v_total[2] = data->v_total[5];
+	data->h_total[3] = data->h_total[5];
+	data->v_total[3] = data->v_total[5];
+	/*underlay0 same and graphics display pipe0*/
+	data->pixel_rate[0] = data->pixel_rate[4];
+	data->pixel_rate[1] = data->pixel_rate[4];
+	/*underlay1 same and graphics display pipe1*/
+	data->pixel_rate[2] = data->pixel_rate[5];
+	data->pixel_rate[3] = data->pixel_rate[5];
+	if ((data->underlay_tiling_mode == bw_def_array_linear_general || data->underlay_tiling_mode == bw_def_array_linear_aligned)) {
+		tiling_mode[0] = bw_def_linear;
+		tiling_mode[1] = bw_def_linear;
+		tiling_mode[2] = bw_def_linear;
+		tiling_mode[3] = bw_def_linear;
+	}
+	else {
+		tiling_mode[0] = bw_def_landscape;
+		tiling_mode[1] = bw_def_landscape;
+		tiling_mode[2] = bw_def_landscape;
+		tiling_mode[3] = bw_def_landscape;
+	}
+	data->lb_bpc[0] = data->underlay_lb_bpc;
+	data->lb_bpc[1] = data->underlay_lb_bpc;
+	data->lb_bpc[2] = data->underlay_lb_bpc;
+	data->lb_bpc[3] = data->underlay_lb_bpc;
+	data->compression_rate[0] = bw_int_to_fixed(1);
+	data->compression_rate[1] = bw_int_to_fixed(1);
+	data->compression_rate[2] = bw_int_to_fixed(1);
+	data->compression_rate[3] = bw_int_to_fixed(1);
+	data->access_one_channel_only[0] = 0;
+	data->access_one_channel_only[1] = 0;
+	data->access_one_channel_only[2] = 0;
+	data->access_one_channel_only[3] = 0;
+	data->cursor_width_pixels[0] = bw_int_to_fixed(0);
+	data->cursor_width_pixels[1] = bw_int_to_fixed(0);
+	data->cursor_width_pixels[2] = bw_int_to_fixed(0);
+	data->cursor_width_pixels[3] = bw_int_to_fixed(0);
+	/* graphics surface parameters from spreadsheet*/
+	fbc_enabled = 0;
+	lpt_enabled = 0;
+	for (i = 4; i <= maximum_number_of_surfaces - 3; i++) {
+		if (i < data->number_of_displays + 4) {
+			if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) {
+				data->enable[i] = 0;
+				data->use_alpha[i] = 0;
+			}
+			else if (i == 4 && data->d0_underlay_mode == bw_def_blend) {
+				data->enable[i] = 1;
+				data->use_alpha[i] = 1;
+			}
+			else if (i == 4) {
+				data->enable[i] = 1;
+				data->use_alpha[i] = 0;
+			}
+			else if (i == 5 && data->d1_underlay_mode == bw_def_underlay_only) {
+				data->enable[i] = 0;
+				data->use_alpha[i] = 0;
+			}
+			else if (i == 5 && data->d1_underlay_mode == bw_def_blend) {
+				data->enable[i] = 1;
+				data->use_alpha[i] = 1;
+			}
+			else {
+				data->enable[i] = 1;
+				data->use_alpha[i] = 0;
+			}
+		}
+		else {
+			data->enable[i] = 0;
+			data->use_alpha[i] = 0;
+		}
+		data->scatter_gather_enable_for_pipe[i] = vbios->scatter_gather_enable;
+		surface_type[i] = bw_def_graphics;
+		data->lb_size_per_component[i] = dceip->lb_size_per_component444;
+		if (data->graphics_tiling_mode == bw_def_array_linear_general || data->graphics_tiling_mode == bw_def_array_linear_aligned) {
+			tiling_mode[i] = bw_def_linear;
+		}
+		else {
+			tiling_mode[i] = bw_def_tiled;
+		}
+		data->lb_bpc[i] = data->graphics_lb_bpc;
+		if ((data->fbc_en[i] == 1 && (dceip->argb_compression_support || data->d0_underlay_mode != bw_def_blended))) {
+			data->compression_rate[i] = bw_int_to_fixed(vbios->average_compression_rate);
+			data->access_one_channel_only[i] = data->lpt_en[i];
+		}
+		else {
+			data->compression_rate[i] = bw_int_to_fixed(1);
+			data->access_one_channel_only[i] = 0;
+		}
+		if (data->fbc_en[i] == 1) {
+			fbc_enabled = 1;
+			if (data->lpt_en[i] == 1) {
+				lpt_enabled = 1;
+			}
+		}
+		data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width);
+	}
+	/* display_write_back420*/
+	data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 2] = 0;
+	data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 1] = 0;
+	if (data->d1_display_write_back_dwb_enable == 1) {
+		data->enable[maximum_number_of_surfaces - 2] = 1;
+		data->enable[maximum_number_of_surfaces - 1] = 1;
+	}
+	else {
+		data->enable[maximum_number_of_surfaces - 2] = 0;
+		data->enable[maximum_number_of_surfaces - 1] = 0;
+	}
+	surface_type[maximum_number_of_surfaces - 2] = bw_def_display_write_back420_luma;
+	surface_type[maximum_number_of_surfaces - 1] = bw_def_display_write_back420_chroma;
+	data->lb_size_per_component[maximum_number_of_surfaces - 2] = dceip->underlay420_luma_lb_size_per_component;
+	data->lb_size_per_component[maximum_number_of_surfaces - 1] = dceip->underlay420_chroma_lb_size_per_component;
+	data->bytes_per_pixel[maximum_number_of_surfaces - 2] = 1;
+	data->bytes_per_pixel[maximum_number_of_surfaces - 1] = 2;
+	data->interlace_mode[maximum_number_of_surfaces - 2] = data->interlace_mode[5];
+	data->interlace_mode[maximum_number_of_surfaces - 1] = data->interlace_mode[5];
+	data->h_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+	data->h_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+	data->v_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+	data->v_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+	data->rotation_angle[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0);
+	data->rotation_angle[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0);
+	tiling_mode[maximum_number_of_surfaces - 2] = bw_def_linear;
+	tiling_mode[maximum_number_of_surfaces - 1] = bw_def_linear;
+	data->lb_bpc[maximum_number_of_surfaces - 2] = 8;
+	data->lb_bpc[maximum_number_of_surfaces - 1] = 8;
+	data->compression_rate[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+	data->compression_rate[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+	data->access_one_channel_only[maximum_number_of_surfaces - 2] = 0;
+	data->access_one_channel_only[maximum_number_of_surfaces - 1] = 0;
+	/*assume display pipe1 has dwb enabled*/
+	data->h_total[maximum_number_of_surfaces - 2] = data->h_total[5];
+	data->h_total[maximum_number_of_surfaces - 1] = data->h_total[5];
+	data->v_total[maximum_number_of_surfaces - 2] = data->v_total[5];
+	data->v_total[maximum_number_of_surfaces - 1] = data->v_total[5];
+	data->pixel_rate[maximum_number_of_surfaces - 2] = data->pixel_rate[5];
+	data->pixel_rate[maximum_number_of_surfaces - 1] = data->pixel_rate[5];
+	data->src_width[maximum_number_of_surfaces - 2] = data->src_width[5];
+	data->src_width[maximum_number_of_surfaces - 1] = data->src_width[5];
+	data->src_height[maximum_number_of_surfaces - 2] = data->src_height[5];
+	data->src_height[maximum_number_of_surfaces - 1] = data->src_height[5];
+	data->pitch_in_pixels[maximum_number_of_surfaces - 2] = data->src_width[5];
+	data->pitch_in_pixels[maximum_number_of_surfaces - 1] = data->src_width[5];
+	data->h_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+	data->h_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+	data->v_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+	data->v_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+	data->stereo_mode[maximum_number_of_surfaces - 2] = bw_def_mono;
+	data->stereo_mode[maximum_number_of_surfaces - 1] = bw_def_mono;
+	data->cursor_width_pixels[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0);
+	data->cursor_width_pixels[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0);
+	data->use_alpha[maximum_number_of_surfaces - 2] = 0;
+	data->use_alpha[maximum_number_of_surfaces - 1] = 0;
+	/*mode check calculations:*/
+	/* mode within dce ip capabilities*/
+	/* fbc*/
+	/* hsr*/
+	/* vsr*/
+	/* lb size*/
+	/*effective scaling source and ratios:*/
+	/*for graphics, non-stereo, non-interlace surfaces when the size of the source and destination are the same, only one tap is used*/
+	/*420 chroma has half the width, height, horizontal and vertical scaling ratios than luma*/
+	/*rotating a graphic or underlay surface swaps the width, height, horizontal and vertical scaling ratios*/
+	/*in top-bottom stereo mode there is 2:1 vertical downscaling for each eye*/
+	/*in side-by-side stereo mode there is 2:1 horizontal downscaling for each eye*/
+	/*in interlace mode there is 2:1 vertical downscaling for each field*/
+	/*in panning or bezel adjustment mode the source width has an extra 128 pixels*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (bw_equ(data->h_scale_ratio[i], bw_int_to_fixed(1)) && bw_equ(data->v_scale_ratio[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics && data->stereo_mode[i] == bw_def_mono && data->interlace_mode[i] == 0) {
+				data->h_taps[i] = bw_int_to_fixed(1);
+				data->v_taps[i] = bw_int_to_fixed(1);
+			}
+			if (surface_type[i] == bw_def_display_write_back420_chroma || surface_type[i] == bw_def_underlay420_chroma) {
+				data->pitch_in_pixels_after_surface_type[i] = bw_div(data->pitch_in_pixels[i], bw_int_to_fixed(2));
+				data->src_width_after_surface_type = bw_div(data->src_width[i], bw_int_to_fixed(2));
+				data->src_height_after_surface_type = bw_div(data->src_height[i], bw_int_to_fixed(2));
+				data->hsr_after_surface_type = bw_div(data->h_scale_ratio[i], bw_int_to_fixed(2));
+				data->vsr_after_surface_type = bw_div(data->v_scale_ratio[i], bw_int_to_fixed(2));
+			}
+			else {
+				data->pitch_in_pixels_after_surface_type[i] = data->pitch_in_pixels[i];
+				data->src_width_after_surface_type = data->src_width[i];
+				data->src_height_after_surface_type = data->src_height[i];
+				data->hsr_after_surface_type = data->h_scale_ratio[i];
+				data->vsr_after_surface_type = data->v_scale_ratio[i];
+			}
+			if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+				data->src_width_after_rotation = data->src_height_after_surface_type;
+				data->src_height_after_rotation = data->src_width_after_surface_type;
+				data->hsr_after_rotation = data->vsr_after_surface_type;
+				data->vsr_after_rotation = data->hsr_after_surface_type;
+			}
+			else {
+				data->src_width_after_rotation = data->src_width_after_surface_type;
+				data->src_height_after_rotation = data->src_height_after_surface_type;
+				data->hsr_after_rotation = data->hsr_after_surface_type;
+				data->vsr_after_rotation = data->vsr_after_surface_type;
+			}
+			switch (data->stereo_mode[i]) {
+			case bw_def_top_bottom:
+				data->source_width_pixels[i] = data->src_width_after_rotation;
+				data->source_height_pixels = bw_mul(bw_int_to_fixed(2), data->src_height_after_rotation);
+				data->hsr_after_stereo = data->hsr_after_rotation;
+				data->vsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->vsr_after_rotation);
+				break;
+			case bw_def_side_by_side:
+				data->source_width_pixels[i] = bw_mul(bw_int_to_fixed(2), data->src_width_after_rotation);
+				data->source_height_pixels = data->src_height_after_rotation;
+				data->hsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->hsr_after_rotation);
+				data->vsr_after_stereo = data->vsr_after_rotation;
+				break;
+			default:
+				data->source_width_pixels[i] = data->src_width_after_rotation;
+				data->source_height_pixels = data->src_height_after_rotation;
+				data->hsr_after_stereo = data->hsr_after_rotation;
+				data->vsr_after_stereo = data->vsr_after_rotation;
+				break;
+			}
+			data->hsr[i] = data->hsr_after_stereo;
+			if (data->interlace_mode[i]) {
+				data->vsr[i] = bw_mul(data->vsr_after_stereo, bw_int_to_fixed(2));
+			}
+			else {
+				data->vsr[i] = data->vsr_after_stereo;
+			}
+			if (data->panning_and_bezel_adjustment != bw_def_none) {
+				data->source_width_rounded_up_to_chunks[i] = bw_add(bw_floor2(bw_sub(data->source_width_pixels[i], bw_int_to_fixed(1)), bw_int_to_fixed(128)), bw_int_to_fixed(256));
+			}
+			else {
+				data->source_width_rounded_up_to_chunks[i] = bw_ceil2(data->source_width_pixels[i], bw_int_to_fixed(128));
+			}
+			data->source_height_rounded_up_to_chunks[i] = data->source_height_pixels;
+		}
+	}
+	/*mode support checks:*/
+	/*the number of graphics and underlay pipes is limited by the ip support*/
+	/*maximum horizontal and vertical scale ratio is 4, and should not exceed the number of taps*/
+	/*for downscaling with the pre-downscaler, the horizontal scale ratio must be more than the ceiling of one quarter of the number of taps*/
+	/*the pre-downscaler reduces the line buffer source by the horizontal scale ratio*/
+	/*the number of lines in the line buffer has to exceed the number of vertical taps*/
+	/*the size of the line in the line buffer is the product of the source width and the bits per component, rounded up to a multiple of 48*/
+	/*the size of the line in the line buffer in the case of 10 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/
+	/*the size of the line in the line buffer in the case of 8 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/
+	/*frame buffer compression is not supported with stereo mode, rotation, or non- 888 formats*/
+	/*rotation is not supported with linear of stereo modes*/
+	if (dceip->number_of_graphics_pipes >= data->number_of_displays && dceip->number_of_underlay_pipes >= data->number_of_underlay_surfaces && !(dceip->display_write_back_supported == 0 && data->d1_display_write_back_dwb_enable == 1)) {
+		pipe_check = bw_def_ok;
+	}
+	else {
+		pipe_check = bw_def_notok;
+	}
+	hsr_check = bw_def_ok;
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (bw_neq(data->hsr[i], bw_int_to_fixed(1))) {
+				if (bw_mtn(data->hsr[i], bw_int_to_fixed(4))) {
+					hsr_check = bw_def_hsr_mtn_4;
+				}
+				else {
+					if (bw_mtn(data->hsr[i], data->h_taps[i])) {
+						hsr_check = bw_def_hsr_mtn_h_taps;
+					}
+					else {
+						if (dceip->pre_downscaler_enabled == 1 && bw_mtn(data->hsr[i], bw_int_to_fixed(1)) && bw_leq(data->hsr[i], bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)))) {
+							hsr_check = bw_def_ceiling__h_taps_div_4___meq_hsr;
+						}
+					}
+				}
+			}
+		}
+	}
+	vsr_check = bw_def_ok;
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (bw_neq(data->vsr[i], bw_int_to_fixed(1))) {
+				if (bw_mtn(data->vsr[i], bw_int_to_fixed(4))) {
+					vsr_check = bw_def_vsr_mtn_4;
+				}
+				else {
+					if (bw_mtn(data->vsr[i], data->v_taps[i])) {
+						vsr_check = bw_def_vsr_mtn_v_taps;
+					}
+				}
+			}
+		}
+	}
+	lb_size_check = bw_def_ok;
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if ((dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1)))) {
+				data->source_width_in_lb = bw_div(data->source_width_pixels[i], data->hsr[i]);
+			}
+			else {
+				data->source_width_in_lb = data->source_width_pixels[i];
+			}
+			switch (data->lb_bpc[i]) {
+			case 8:
+				data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(2401171875ul, 100000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48));
+				break;
+			case 10:
+				data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(300234375, 10000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48));
+				break;
+			default:
+				data->lb_line_pitch = bw_ceil2(bw_mul(bw_int_to_fixed(data->lb_bpc[i]), data->source_width_in_lb), bw_int_to_fixed(48));
+				break;
+			}
+			data->lb_partitions[i] = bw_floor2(bw_div(data->lb_size_per_component[i], data->lb_line_pitch), bw_int_to_fixed(1));
+			/*clamp the partitions to the maxium number supported by the lb*/
+			if ((surface_type[i] != bw_def_graphics || dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) {
+				data->lb_partitions_max[i] = bw_int_to_fixed(10);
+			}
+			else {
+				data->lb_partitions_max[i] = bw_int_to_fixed(7);
+			}
+			data->lb_partitions[i] = bw_min2(data->lb_partitions_max[i], data->lb_partitions[i]);
+			if (bw_mtn(bw_add(data->v_taps[i], bw_int_to_fixed(1)), data->lb_partitions[i])) {
+				lb_size_check = bw_def_notok;
+			}
+		}
+	}
+	fbc_check = bw_def_ok;
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i] && data->fbc_en[i] == 1 && (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)) || data->stereo_mode[i] != bw_def_mono || data->bytes_per_pixel[i] != 4)) {
+			fbc_check = bw_def_invalid_rotation_or_bpp_or_stereo;
+		}
+	}
+	rotation_check = bw_def_ok;
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && (tiling_mode[i] == bw_def_linear || data->stereo_mode[i] != bw_def_mono)) {
+				rotation_check = bw_def_invalid_linear_or_stereo_mode;
+			}
+		}
+	}
+	if (pipe_check == bw_def_ok && hsr_check == bw_def_ok && vsr_check == bw_def_ok && lb_size_check == bw_def_ok && fbc_check == bw_def_ok && rotation_check == bw_def_ok) {
+		mode_check = bw_def_ok;
+	}
+	else {
+		mode_check = bw_def_notok;
+	}
+	/*number of memory channels for write-back client*/
+	data->number_of_dram_wrchannels = vbios->number_of_dram_channels;
+	data->number_of_dram_channels = vbios->number_of_dram_channels;
+	/*modify number of memory channels if lpt mode is enabled*/
+	/* low power tiling mode register*/
+	/* 0 = use channel 0*/
+	/* 1 = use channel 0 and 1*/
+	/* 2 = use channel 0,1,2,3*/
+	if ((fbc_enabled == 1 && lpt_enabled == 1)) {
+		data->dram_efficiency = bw_int_to_fixed(1);
+		if (dceip->low_power_tiling_mode == 0) {
+			data->number_of_dram_channels = 1;
+		}
+		else if (dceip->low_power_tiling_mode == 1) {
+			data->number_of_dram_channels = 2;
+		}
+		else if (dceip->low_power_tiling_mode == 2) {
+			data->number_of_dram_channels = 4;
+		}
+		else {
+			data->number_of_dram_channels = 1;
+		}
+	}
+	else {
+		data->dram_efficiency = bw_frc_to_fixed(8, 10);
+	}
+	/*memory request size and latency hiding:*/
+	/*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/
+	/*the display write-back requests are single line*/
+	/*for tiled graphics surfaces, or undelay surfaces with width higher than the maximum size for full efficiency, request size is 32 byte in 8 and 16 bpp or if the rotation is orthogonal to the tiling grain. only half is useful of the bytes in the request size in 8 bpp or in 32 bpp if the rotation is orthogonal to the tiling grain.*/
+	/*for undelay surfaces with width lower than the maximum size for full efficiency, requests are 4-line interleaved in 16bpp if the rotation is parallel to the tiling grain, and 8-line interleaved with 4-line latency hiding in 8bpp or if the rotation is orthogonal to the tiling grain.*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)))) {
+				if ((i < 4)) {
+					/*underlay portrait tiling mode is not supported*/
+					data->orthogonal_rotation[i] = 1;
+				}
+				else {
+					/*graphics portrait tiling mode*/
+					if ((data->graphics_micro_tile_mode == bw_def_rotated_micro_tiling)) {
+						data->orthogonal_rotation[i] = 0;
+					}
+					else {
+						data->orthogonal_rotation[i] = 1;
+					}
+				}
+			}
+			else {
+				if ((i < 4)) {
+					/*underlay landscape tiling mode is only supported*/
+					if ((data->underlay_micro_tile_mode == bw_def_display_micro_tiling)) {
+						data->orthogonal_rotation[i] = 0;
+					}
+					else {
+						data->orthogonal_rotation[i] = 1;
+					}
+				}
+				else {
+					/*graphics landscape tiling mode*/
+					if ((data->graphics_micro_tile_mode == bw_def_display_micro_tiling)) {
+						data->orthogonal_rotation[i] = 0;
+					}
+					else {
+						data->orthogonal_rotation[i] = 1;
+					}
+				}
+			}
+			if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) {
+				data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_height_efficient_for_tiling;
+			}
+			else {
+				data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_width_efficient_for_tiling;
+			}
+			if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
+				data->bytes_per_request[i] = bw_int_to_fixed(64);
+				data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+				data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(1);
+				data->latency_hiding_lines[i] = bw_int_to_fixed(1);
+			}
+			else if (tiling_mode[i] == bw_def_linear) {
+				data->bytes_per_request[i] = bw_int_to_fixed(64);
+				data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+				data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+				data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+			}
+			else {
+				if (surface_type[i] == bw_def_graphics || (bw_mtn(data->source_width_rounded_up_to_chunks[i], bw_ceil2(data->underlay_maximum_source_efficient_for_tiling, bw_int_to_fixed(256))))) {
+					switch (data->bytes_per_pixel[i]) {
+					case 8:
+						data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+						data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+						if (data->orthogonal_rotation[i]) {
+							data->bytes_per_request[i] = bw_int_to_fixed(32);
+							data->useful_bytes_per_request[i] = bw_int_to_fixed(32);
+						}
+						else {
+							data->bytes_per_request[i] = bw_int_to_fixed(64);
+							data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+						}
+						break;
+					case 4:
+						if (data->orthogonal_rotation[i]) {
+							data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+							data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+							data->bytes_per_request[i] = bw_int_to_fixed(32);
+							data->useful_bytes_per_request[i] = bw_int_to_fixed(16);
+						}
+						else {
+							data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+							data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+							data->bytes_per_request[i] = bw_int_to_fixed(64);
+							data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+						}
+						break;
+					case 2:
+						data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+						data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+						data->bytes_per_request[i] = bw_int_to_fixed(32);
+						data->useful_bytes_per_request[i] = bw_int_to_fixed(32);
+						break;
+					default:
+						data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+						data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+						data->bytes_per_request[i] = bw_int_to_fixed(32);
+						data->useful_bytes_per_request[i] = bw_int_to_fixed(16);
+						break;
+					}
+				}
+				else {
+					data->bytes_per_request[i] = bw_int_to_fixed(64);
+					data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+					if (data->orthogonal_rotation[i]) {
+						data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8);
+						data->latency_hiding_lines[i] = bw_int_to_fixed(4);
+					}
+					else {
+						switch (data->bytes_per_pixel[i]) {
+						case 4:
+							data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+							data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+							break;
+						case 2:
+							data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(4);
+							data->latency_hiding_lines[i] = bw_int_to_fixed(4);
+							break;
+						default:
+							data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8);
+							data->latency_hiding_lines[i] = bw_int_to_fixed(4);
+							break;
+						}
+					}
+				}
+			}
+		}
+	}
+	/*requested peak bandwidth:*/
+	/*the peak request-per-second bandwidth is the product of the maximum source lines in per line out in the beginning*/
+	/*and in the middle of the frame, the ratio of the source width to the line time, the ratio of line interleaving*/
+	/*in memory to lines of latency hiding, and the ratio of bytes per pixel to useful bytes per request.*/
+	/**/
+	/*if the dmif data buffer size holds more than vta_ps worth of source lines, then only vsr is used.*/
+	/*the peak bandwidth is the peak request-per-second bandwidth times the request size.*/
+	/**/
+	/*the line buffer lines in per line out in the beginning of the frame is the vertical filter initialization value*/
+	/*rounded up to even and divided by the line times for initialization, which is normally three.*/
+	/*the line buffer lines in per line out in the middle of the frame is at least one, or the vertical scale ratio,*/
+	/*rounded up to line pairs if not doing line buffer prefetching.*/
+	/**/
+	/*the non-prefetching rounding up of the vertical scale ratio can also be done up to 1 (for a 0,2 pattern), 4/3 (for a 0,2,2 pattern),*/
+	/*6/4 (for a 0,2,2,2 pattern), or 3 (for a 2,4 pattern).*/
+	/**/
+	/*the scaler vertical filter initialization value is calculated by the hardware as the floor of the average of the*/
+	/*vertical scale ratio and the number of vertical taps increased by one.  add one more for possible odd line*/
+	/*panning/bezel adjustment mode.*/
+	/**/
+	/*for the bottom interlace field an extra 50% of the vertical scale ratio is considered for this calculation.*/
+	/*in top-bottom stereo mode software has to set the filter initialization value manually and explicitly limit it to 4.*/
+	/*furthermore, there is only one line time for initialization.*/
+	/**/
+	/*line buffer prefetching is done when the number of lines in the line buffer exceeds the number of taps plus*/
+	/*the ceiling of the vertical scale ratio.*/
+	/**/
+	/*multi-line buffer prefetching is only done in the graphics pipe when the scaler is disabled or when upscaling and the vsr <= 0.8.'*/
+	/**/
+	/*the horizontal blank and chunk granularity factor is indirectly used indicate the interval of time required to transfer the source pixels.*/
+	/*the denominator of this term represents the total number of destination output pixels required for the input source pixels.*/
+	/*it applies when the lines in per line out is not 2 or 4.  it does not apply when there is a line buffer between the scl and blnd.*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->v_filter_init[i] = bw_floor2(bw_div((bw_add(bw_add(bw_add(bw_int_to_fixed(1), data->v_taps[i]), data->vsr[i]), bw_mul(bw_mul(bw_int_to_fixed(data->interlace_mode[i]), bw_frc_to_fixed(5, 10)), data->vsr[i]))), bw_int_to_fixed(2)), bw_int_to_fixed(1));
+			if (data->panning_and_bezel_adjustment == bw_def_any_lines) {
+				data->v_filter_init[i] = bw_add(data->v_filter_init[i], bw_int_to_fixed(1));
+			}
+			if (data->stereo_mode[i] == bw_def_top_bottom) {
+				v_filter_init_mode[i] = bw_def_manual;
+				data->v_filter_init[i] = bw_min2(data->v_filter_init[i], bw_int_to_fixed(4));
+			}
+			else {
+				v_filter_init_mode[i] = bw_def_auto;
+			}
+			if (data->stereo_mode[i] == bw_def_top_bottom) {
+				data->num_lines_at_frame_start = bw_int_to_fixed(1);
+			}
+			else {
+				data->num_lines_at_frame_start = bw_int_to_fixed(3);
+			}
+			if ((bw_mtn(data->vsr[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics) || data->panning_and_bezel_adjustment == bw_def_any_lines) {
+				data->line_buffer_prefetch[i] = 0;
+			}
+			else if ((((dceip->underlay_downscale_prefetch_enabled == 1 && surface_type[i] != bw_def_graphics) || surface_type[i] == bw_def_graphics) && (bw_mtn(data->lb_partitions[i], bw_add(data->v_taps[i], bw_ceil2(data->vsr[i], bw_int_to_fixed(1))))))) {
+				data->line_buffer_prefetch[i] = 1;
+			}
+			else {
+				data->line_buffer_prefetch[i] = 0;
+			}
+			data->lb_lines_in_per_line_out_in_beginning_of_frame[i] = bw_div(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->num_lines_at_frame_start);
+			if (data->line_buffer_prefetch[i] == 1) {
+				data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_max2(bw_int_to_fixed(1), data->vsr[i]);
+			}
+			else if (bw_leq(data->vsr[i], bw_int_to_fixed(1))) {
+				data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(1);
+			} else if (bw_leq(data->vsr[i],
+					bw_frc_to_fixed(4, 3))) {
+				data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(4), bw_int_to_fixed(3));
+			} else if (bw_leq(data->vsr[i],
+					bw_frc_to_fixed(6, 4))) {
+				data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(6), bw_int_to_fixed(4));
+			}
+			else if (bw_leq(data->vsr[i], bw_int_to_fixed(2))) {
+				data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(2);
+			}
+			else if (bw_leq(data->vsr[i], bw_int_to_fixed(3))) {
+				data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(3);
+			}
+			else {
+				data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(4);
+			}
+			if (data->line_buffer_prefetch[i] == 1 || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(2)) || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(4))) {
+				data->horizontal_blank_and_chunk_granularity_factor[i] = bw_int_to_fixed(1);
+			}
+			else {
+				data->horizontal_blank_and_chunk_granularity_factor[i] = bw_div(data->h_total[i], (bw_div((bw_add(data->h_total[i], bw_div((bw_sub(data->source_width_pixels[i], bw_int_to_fixed(dceip->chunk_width))), data->hsr[i]))), bw_int_to_fixed(2))));
+			}
+			data->request_bandwidth[i] = bw_div(bw_mul(bw_div(bw_mul(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], data->lb_lines_in_per_line_out_in_middle_of_frame[i]), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), bw_int_to_fixed(data->bytes_per_pixel[i])), data->useful_bytes_per_request[i]), data->lines_interleaved_in_mem_access[i]), data->latency_hiding_lines[i]);
+			data->display_bandwidth[i] = bw_mul(data->request_bandwidth[i], data->bytes_per_request[i]);
+		}
+	}
+	/*outstanding chunk request limit*/
+	/*if underlay buffer sharing is enabled, the data buffer size for underlay in 422 or 444 is the sum of the luma and chroma data buffer sizes.*/
+	/*underlay buffer sharing mode is only permitted in orthogonal rotation modes.*/
+	/**/
+	/*if there is only one display enabled, the dmif data buffer size for the graphics surface is increased by concatenating the adjacent buffers.*/
+	/**/
+	/*the memory chunk size in bytes is 1024 for the writeback, and 256 times the memory line interleaving and the bytes per pixel for graphics*/
+	/*and underlay.*/
+	/**/
+	/*the pipe chunk size uses 2 for line interleaving, except for the write back, in which case it is 1.*/
+	/*graphics and underlay data buffer size is adjusted (limited) using the outstanding chunk request limit if there is more than one*/
+	/*display enabled or if the dmif request buffer is not large enough for the total data buffer size.*/
+	/*the outstanding chunk request limit is the ceiling of the adjusted data buffer size divided by the chunk size in bytes*/
+	/*the adjusted data buffer size is the product of the display bandwidth and the minimum effective data buffer size in terms of time,*/
+	/*rounded up to the chunk size in bytes, but should not exceed the original data buffer size*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if ((dceip->dmif_pipe_en_fbc_chunk_tracker + 3 == i && fbc_enabled == 0 && tiling_mode[i] != bw_def_linear)) {
+				data->max_chunks_non_fbc_mode[i] = 128 - dmif_chunk_buff_margin;
+			}
+			else {
+				data->max_chunks_non_fbc_mode[i] = 16 - dmif_chunk_buff_margin;
+			}
+		}
+		if (data->fbc_en[i] == 1) {
+			max_chunks_fbc_mode = 128 - dmif_chunk_buff_margin;
+		}
+	}
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			switch (surface_type[i]) {
+			case bw_def_display_write_back420_luma:
+				data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_luma_mcifwr_buffer_size);
+				break;
+			case bw_def_display_write_back420_chroma:
+				data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_chroma_mcifwr_buffer_size);
+				break;
+			case bw_def_underlay420_luma:
+				data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size);
+				break;
+			case bw_def_underlay420_chroma:
+				data->data_buffer_size[i] = bw_div(bw_int_to_fixed(dceip->underlay_chroma_dmif_size), bw_int_to_fixed(2));
+				break;
+			case bw_def_underlay422:case bw_def_underlay444:
+				if (data->orthogonal_rotation[i] == 0) {
+					data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size);
+				}
+				else {
+					data->data_buffer_size[i] = bw_add(bw_int_to_fixed(dceip->underlay_luma_dmif_size), bw_int_to_fixed(dceip->underlay_chroma_dmif_size));
+				}
+				break;
+			default:
+				if (data->fbc_en[i] == 1) {
+					/*data_buffer_size(i) = max_dmif_buffer_allocated * graphics_dmif_size*/
+					if (data->number_of_displays == 1) {
+						data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size)));
+					}
+					else {
+						data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size));
+					}
+				}
+				else {
+					/*the effective dmif buffer size in non-fbc mode is limited by the 16 entry chunk tracker*/
+					if (data->number_of_displays == 1) {
+						data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size)));
+					}
+					else {
+						data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size));
+					}
+				}
+				break;
+			}
+			if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
+				data->memory_chunk_size_in_bytes[i] = bw_int_to_fixed(1024);
+				data->pipe_chunk_size_in_bytes[i] = bw_int_to_fixed(1024);
+			}
+			else {
+				data->memory_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), data->lines_interleaved_in_mem_access[i]), bw_int_to_fixed(data->bytes_per_pixel[i]));
+				data->pipe_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_int_to_fixed(data->bytes_per_pixel[i]));
+			}
+		}
+	}
+	data->min_dmif_size_in_time = bw_int_to_fixed(9999);
+	data->min_mcifwr_size_in_time = bw_int_to_fixed(9999);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+				if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_dmif_size_in_time)) {
+					data->min_dmif_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]);
+				}
+			}
+			else {
+				if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_mcifwr_size_in_time)) {
+					data->min_mcifwr_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]);
+				}
+			}
+		}
+	}
+	data->total_requests_for_dmif_size = bw_int_to_fixed(0);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i] && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+			data->total_requests_for_dmif_size = bw_add(data->total_requests_for_dmif_size, bw_div(data->data_buffer_size[i], data->useful_bytes_per_request[i]));
+		}
+	}
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma && dceip->limit_excessive_outstanding_dmif_requests && (data->number_of_displays > 1 || bw_mtn(data->total_requests_for_dmif_size, dceip->dmif_request_buffer_size))) {
+				data->adjusted_data_buffer_size[i] = bw_min2(data->data_buffer_size[i], bw_ceil2(bw_mul(data->min_dmif_size_in_time, data->display_bandwidth[i]), data->memory_chunk_size_in_bytes[i]));
+			}
+			else {
+				data->adjusted_data_buffer_size[i] = data->data_buffer_size[i];
+			}
+		}
+	}
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if ((data->number_of_displays == 1 && data->number_of_underlay_surfaces == 0)) {
+				/*set maximum chunk limit if only one graphic pipe is enabled*/
+				data->outstanding_chunk_request_limit[i] = bw_int_to_fixed(127);
+			}
+			else {
+				data->outstanding_chunk_request_limit[i] = bw_ceil2(bw_div(data->adjusted_data_buffer_size[i], data->pipe_chunk_size_in_bytes[i]), bw_int_to_fixed(1));
+				/*clamp maximum chunk limit in the graphic display pipe*/
+				if ((i >= 4)) {
+					data->outstanding_chunk_request_limit[i] = bw_max2(bw_int_to_fixed(127), data->outstanding_chunk_request_limit[i]);
+				}
+			}
+		}
+	}
+	/*outstanding pte request limit*/
+	/*in tiling mode with no rotation the sg pte requests are 8 useful pt_es, the sg row height is the page height and the sg page width x height is 64x64 for 8bpp, 64x32 for 16 bpp, 32x32 for 32 bpp*/
+	/*in tiling mode with rotation the sg pte requests are only one useful pte, and the sg row height is also the page height, but the sg page width and height are swapped*/
+	/*in linear mode the pte requests are 8 useful pt_es, the sg page width is 4096 divided by the bytes per pixel, the sg page height is 1, but there is just one row whose height is the lines of pte prefetching*/
+	/*the outstanding pte request limit is obtained by multiplying the outstanding chunk request limit by the peak pte request to eviction limiting ratio, rounding up to integer, multiplying by the pte requests per chunk, and rounding up to integer again*/
+	/*if not using peak pte request to eviction limiting, the outstanding pte request limit is the pte requests in the vblank*/
+	/*the pte requests in the vblank is the product of the number of pte request rows times the number of pte requests in a row*/
+	/*the number of pte requests in a row is the quotient of the source width divided by 256, multiplied by the pte requests per chunk, rounded up to even, multiplied by the scatter-gather row height and divided by the scatter-gather page height*/
+	/*the pte requests per chunk is 256 divided by the scatter-gather page width and the useful pt_es per pte request*/
+	if (data->number_of_displays > 1 || (bw_neq(data->rotation_angle[4], bw_int_to_fixed(0)) && bw_neq(data->rotation_angle[4], bw_int_to_fixed(180)))) {
+		data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display;
+	}
+	else {
+		data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation;
+	}
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) {
+			if (tiling_mode[i] == bw_def_linear) {
+				data->useful_pte_per_pte_request = bw_int_to_fixed(8);
+				data->scatter_gather_page_width[i] = bw_div(bw_int_to_fixed(4096), bw_int_to_fixed(data->bytes_per_pixel[i]));
+				data->scatter_gather_page_height[i] = bw_int_to_fixed(1);
+				data->scatter_gather_pte_request_rows = bw_int_to_fixed(1);
+				data->scatter_gather_row_height = bw_int_to_fixed(dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode);
+			}
+			else if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(0)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(180))) {
+				data->useful_pte_per_pte_request = bw_int_to_fixed(8);
+				switch (data->bytes_per_pixel[i]) {
+				case 4:
+					data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
+					data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
+					break;
+				case 2:
+					data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
+					data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
+					break;
+				default:
+					data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
+					data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
+					break;
+				}
+				data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode);
+				data->scatter_gather_row_height = data->scatter_gather_page_height[i];
+			}
+			else {
+				data->useful_pte_per_pte_request = bw_int_to_fixed(1);
+				switch (data->bytes_per_pixel[i]) {
+				case 4:
+					data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
+					data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
+					break;
+				case 2:
+					data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
+					data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
+					break;
+				default:
+					data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
+					data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
+					break;
+				}
+				data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode);
+				data->scatter_gather_row_height = data->scatter_gather_page_height[i];
+			}
+			data->pte_request_per_chunk[i] = bw_div(bw_div(bw_int_to_fixed(dceip->chunk_width), data->scatter_gather_page_width[i]), data->useful_pte_per_pte_request);
+			data->scatter_gather_pte_requests_in_row[i] = bw_div(bw_mul(bw_ceil2(bw_mul(bw_div(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(dceip->chunk_width)), data->pte_request_per_chunk[i]), bw_int_to_fixed(1)), data->scatter_gather_row_height), data->scatter_gather_page_height[i]);
+			data->scatter_gather_pte_requests_in_vblank = bw_mul(data->scatter_gather_pte_request_rows, data->scatter_gather_pte_requests_in_row[i]);
+			if (bw_equ(data->peak_pte_request_to_eviction_ratio_limiting, bw_int_to_fixed(0))) {
+				data->scatter_gather_pte_request_limit[i] = data->scatter_gather_pte_requests_in_vblank;
+			}
+			else {
+				data->scatter_gather_pte_request_limit[i] = bw_max2(dceip->minimum_outstanding_pte_request_limit, bw_min2(data->scatter_gather_pte_requests_in_vblank, bw_ceil2(bw_mul(bw_mul(bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->memory_chunk_size_in_bytes[i]), data->pte_request_per_chunk[i]), data->peak_pte_request_to_eviction_ratio_limiting), bw_int_to_fixed(1))));
+			}
+		}
+	}
+	/*pitch padding recommended for efficiency in linear mode*/
+	/*in linear mode graphics or underlay with scatter gather, a pitch that is a multiple of the channel interleave (256 bytes) times the channel-bank rotation is not efficient*/
+	/*if that is the case it is recommended to pad the pitch by at least 256 pixels*/
+	data->inefficient_linear_pitch_in_bytes = bw_mul(bw_mul(bw_int_to_fixed(256), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels));
+
+	/*pixel transfer time*/
+	/*the dmif and mcifwr yclk(pclk) required is the one that allows the transfer of all pipe's data buffer size in memory in the time for data transfer*/
+	/*for dmif, pte and cursor requests have to be included.*/
+	/*the dram data requirement is doubled when the data request size in bytes is less than the dram channel width times the burst size (8)*/
+	/*the dram data requirement is also multiplied by the number of channels in the case of low power tiling*/
+	/*the page close-open time is determined by trc and the number of page close-opens*/
+	/*in tiled mode graphics or underlay with scatter-gather enabled the bytes per page close-open is the product of the memory line interleave times the maximum of the scatter-gather page width and the product of the tile width (8 pixels) times the number of channels times the number of banks.*/
+	/*in linear mode graphics or underlay with scatter-gather enabled and inefficient pitch, the bytes per page close-open is the line request alternation slice, because different lines are in completely different 4k address bases.*/
+	/*otherwise, the bytes page close-open is the chunk size because that is the arbitration slice.*/
+	/*pte requests are grouped by pte requests per chunk if that is more than 1. each group costs a page close-open time for dmif reads*/
+	/*cursor requests outstanding are limited to a group of two source lines. each group costs a page close-open time for dmif reads*/
+	/*the display reads and writes time for data transfer is the minimum data or cursor buffer size in time minus the mc urgent latency*/
+	/*the mc urgent latency is experienced more than one time if the number of dmif requests in the data buffer exceeds the request buffer size plus the request slots reserved for dmif in the dram channel arbiter queues*/
+	/*the dispclk required is the maximum for all surfaces of the maximum of the source pixels for first output pixel times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, and the source pixels for last output pixel, times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, plus the active time.*/
+	/*the data burst time is the maximum of the total page close-open time, total dmif/mcifwr buffer size in memory divided by the dram bandwidth, and the total dmif/mcifwr buffer size in memory divided by the 32 byte sclk data bus bandwidth, each multiplied by its efficiency.*/
+	/*the source line transfer time is the maximum for all surfaces of the maximum of the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the fist pixel, and the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the last pixel plus the active time.*/
+	/*the source pixels for the first output pixel is 512 if the scaler vertical filter initialization value is greater than 2, and it is 4 times the source width if it is greater than 4.*/
+	/*the source pixels for the last output pixel is the source width times the scaler vertical filter initialization value rounded up to even*/
+	/*the source data for these pixels is the number of pixels times the bytes per pixel times the bytes per request divided by the useful bytes per request.*/
+	data->cursor_total_data = bw_int_to_fixed(0);
+	data->cursor_total_request_groups = bw_int_to_fixed(0);
+	data->scatter_gather_total_pte_requests = bw_int_to_fixed(0);
+	data->scatter_gather_total_pte_request_groups = bw_int_to_fixed(0);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->cursor_total_data = bw_add(data->cursor_total_data, bw_mul(bw_mul(bw_int_to_fixed(2), data->cursor_width_pixels[i]), bw_int_to_fixed(4)));
+			if (dceip->large_cursor == 1) {
+				data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_int_to_fixed((dceip->cursor_max_outstanding_group_num + 1)));
+			}
+			else {
+				data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_ceil2(bw_div(data->cursor_width_pixels[i], dceip->cursor_chunk_width), bw_int_to_fixed(1)));
+			}
+			if (data->scatter_gather_enable_for_pipe[i]) {
+				data->scatter_gather_total_pte_requests = bw_add(data->scatter_gather_total_pte_requests, data->scatter_gather_pte_request_limit[i]);
+				data->scatter_gather_total_pte_request_groups = bw_add(data->scatter_gather_total_pte_request_groups, bw_ceil2(bw_div(data->scatter_gather_pte_request_limit[i], bw_ceil2(data->pte_request_per_chunk[i], bw_int_to_fixed(1))), bw_int_to_fixed(1)));
+			}
+		}
+	}
+	data->tile_width_in_pixels = bw_int_to_fixed(8);
+	data->dmif_total_number_of_data_request_page_close_open = bw_int_to_fixed(0);
+	data->mcifwr_total_number_of_data_request_page_close_open = bw_int_to_fixed(0);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] != bw_def_linear) {
+				data->bytes_per_page_close_open = bw_mul(data->lines_interleaved_in_mem_access[i], bw_max2(bw_mul(bw_mul(bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->tile_width_in_pixels), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels)), bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->scatter_gather_page_width[i])));
+			}
+			else if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] == bw_def_linear && bw_equ(bw_mod((bw_mul(data->pitch_in_pixels_after_surface_type[i], bw_int_to_fixed(data->bytes_per_pixel[i]))), data->inefficient_linear_pitch_in_bytes), bw_int_to_fixed(0))) {
+				data->bytes_per_page_close_open = dceip->linear_mode_line_request_alternation_slice;
+			}
+			else {
+				data->bytes_per_page_close_open = data->memory_chunk_size_in_bytes[i];
+			}
+			if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+				data->dmif_total_number_of_data_request_page_close_open = bw_add(data->dmif_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open));
+			}
+			else {
+				data->mcifwr_total_number_of_data_request_page_close_open = bw_add(data->mcifwr_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open));
+			}
+		}
+	}
+	data->dmif_total_page_close_open_time = bw_div(bw_mul((bw_add(bw_add(data->dmif_total_number_of_data_request_page_close_open, data->scatter_gather_total_pte_request_groups), data->cursor_total_request_groups)), vbios->trc), bw_int_to_fixed(1000));
+	data->mcifwr_total_page_close_open_time = bw_div(bw_mul(data->mcifwr_total_number_of_data_request_page_close_open, vbios->trc), bw_int_to_fixed(1000));
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->adjusted_data_buffer_size_in_memory[i] = bw_div(bw_mul(data->adjusted_data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+		}
+	}
+	data->total_requests_for_adjusted_dmif_size = bw_int_to_fixed(0);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+				data->total_requests_for_adjusted_dmif_size = bw_add(data->total_requests_for_adjusted_dmif_size, bw_div(data->adjusted_data_buffer_size[i], data->useful_bytes_per_request[i]));
+			}
+		}
+	}
+	data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
+	data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips);
+	data->total_display_reads_required_data = bw_int_to_fixed(0);
+	data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0);
+	data->total_display_writes_required_data = bw_int_to_fixed(0);
+	data->total_display_writes_required_dram_access_data = bw_int_to_fixed(0);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+				data->display_reads_required_data = data->adjusted_data_buffer_size_in_memory[i];
+				/*for hbm memories, each channel is split into 2 pseudo-channels that are each 64 bits in width.  each*/
+				/*pseudo-channel may be read independently of one another.*/
+				/*the read burst length (bl) for hbm memories is 4, so each read command will access 32 bytes of data.*/
+				/*the 64 or 32 byte sized data is stored in one pseudo-channel.*/
+				/*it will take 4 memclk cycles or 8 yclk cycles to fetch 64 bytes of data from the hbm memory (2 read commands).*/
+				/*it will take 2 memclk cycles or 4 yclk cycles to fetch 32 bytes of data from the hbm memory (1 read command).*/
+				/*for gddr5/ddr4 memories, there is additional overhead if the size of the request is smaller than 64 bytes.*/
+				/*the read burst length (bl) for gddr5/ddr4 memories is 8, regardless of the size of the data request.*/
+				/*therefore it will require 8 cycles to fetch 64 or 32 bytes of data from the memory.*/
+				/*the memory efficiency will be 50% for the 32 byte sized data.*/
+				if (vbios->memory_type == bw_def_hbm) {
+					data->display_reads_required_dram_access_data = data->adjusted_data_buffer_size_in_memory[i];
+				}
+				else {
+					data->display_reads_required_dram_access_data = bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed((8 * vbios->dram_channel_width_in_bits / 8)), data->bytes_per_request[i]), bw_int_to_fixed(1)));
+				}
+				data->total_display_reads_required_data = bw_add(data->total_display_reads_required_data, data->display_reads_required_data);
+				data->total_display_reads_required_dram_access_data = bw_add(data->total_display_reads_required_dram_access_data, data->display_reads_required_dram_access_data);
+			}
+			else {
+				data->total_display_writes_required_data = bw_add(data->total_display_writes_required_data, data->adjusted_data_buffer_size_in_memory[i]);
+				data->total_display_writes_required_dram_access_data = bw_add(data->total_display_writes_required_dram_access_data, bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits), data->bytes_per_request[i]), bw_int_to_fixed(1))));
+			}
+		}
+	}
+	data->total_display_reads_required_data = bw_add(bw_add(data->total_display_reads_required_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64)));
+	data->total_display_reads_required_dram_access_data = bw_add(bw_add(data->total_display_reads_required_dram_access_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64)));
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(4))) {
+				data->src_pixels_for_first_output_pixel[i] = bw_mul(bw_int_to_fixed(4), data->source_width_rounded_up_to_chunks[i]);
+			}
+			else {
+				if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(2))) {
+					data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(512);
+				}
+				else {
+					data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(0);
+				}
+			}
+			data->src_data_for_first_output_pixel[i] = bw_div(bw_mul(bw_mul(data->src_pixels_for_first_output_pixel[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+			data->src_pixels_for_last_output_pixel[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_mul(bw_ceil2(data->vsr[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->horizontal_blank_and_chunk_granularity_factor[i])));
+			data->src_data_for_last_output_pixel[i] = bw_div(bw_mul(bw_mul(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->lines_interleaved_in_mem_access[i])), bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+			data->active_time[i] = bw_div(bw_div(data->source_width_rounded_up_to_chunks[i], data->hsr[i]), data->pixel_rate[i]);
+		}
+	}
+	for (i = 0; i <= 2; i++) {
+		for (j = 0; j <= 7; j++) {
+			data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
+			if (data->d1_display_write_back_dwb_enable == 1) {
+				data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
+			}
+		}
+	}
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		for (j = 0; j <= 2; j++) {
+			for (k = 0; k <= 7; k++) {
+				if (data->enable[i]) {
+					if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+						/*time to transfer data from the dmif buffer to the lb.  since the mc to dmif transfer time overlaps*/
+						/*with the dmif to lb transfer time, only time to transfer the last chunk  is considered.*/
+						data->dmif_buffer_transfer_time[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], (bw_div(dceip->lb_write_pixels_per_dispclk, (bw_div(vbios->low_voltage_max_dispclk, dceip->display_pipe_throughput_factor)))));
+						data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_add(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->dmif_buffer_transfer_time[i]), data->active_time[i]));
+						/*during an mclk switch the requests from the dce ip are stored in the gmc/arb.  these requests should be serviced immediately*/
+						/*after the mclk switch sequence and not incur an urgent latency penalty.  it is assumed that the gmc/arb can hold up to 256 requests*/
+						/*per memory channel.  if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/
+						/*immediately serviced without a gap in the urgent requests.*/
+						/*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/
+						if (surface_type[i] == bw_def_graphics) {
+							switch (data->lb_bpc[i]) {
+							case 6:
+								data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component;
+								break;
+							case 8:
+								data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component;
+								break;
+							case 10:
+								data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component;
+								break;
+							default:
+								data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component;
+								break;
+							}
+							if (data->use_alpha[i] == 1) {
+								data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency);
+							}
+						}
+						else {
+							switch (data->lb_bpc[i]) {
+							case 6:
+								data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component;
+								break;
+							case 8:
+								data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component;
+								break;
+							case 10:
+								data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component;
+								break;
+							default:
+								data->v_scaler_efficiency = bw_int_to_fixed(3);
+								break;
+							}
+						}
+						if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) {
+							data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i]));
+						}
+						else {
+							data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1))));
+						}
+						data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_mul(bw_int_to_fixed(2), bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]))))));
+					}
+					else {
+						data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]));
+						/*during an mclk switch the requests from the dce ip are stored in the gmc/arb.  these requests should be serviced immediately*/
+						/*after the mclk switch sequence and not incur an urgent latency penalty.  it is assumed that the gmc/arb can hold up to 256 requests*/
+						/*per memory channel.  if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/
+						/*immediately serviced without a gap in the urgent requests.*/
+						/*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/
+						data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i])))));
+					}
+				}
+			}
+		}
+	}
+	/*cpu c-state and p-state change enable*/
+	/*for cpu p-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration*/
+	/*for cpu c-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration and recovery*/
+	/*condition for the blackout duration:*/
+	/* minimum latency hiding > blackout duration + dmif burst time + line source transfer time*/
+	/*condition for the blackout recovery:*/
+	/* recovery time >  dmif burst time + 2 * urgent latency*/
+	/* recovery time > (display bw * blackout duration  + (2 * urgent latency + dmif burst time)*dispclk - dmif size )*/
+	/*                  / (dispclk - display bw)*/
+	/*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/
+	/*the minimum latency hiding is  further limited by the cursor.  the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) {
+				if (bw_ltn(data->vsr[i], bw_int_to_fixed(2))) {
+					data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(1))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]);
+				}
+				else {
+					data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(3))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]);
+				}
+			}
+			else {
+				data->cursor_latency_hiding[i] = bw_int_to_fixed(9999);
+			}
+		}
+	}
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) {
+				data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
+			}
+			else {
+				data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
+			}
+			data->minimum_latency_hiding_with_cursor[i] = bw_min2(data->minimum_latency_hiding[i], data->cursor_latency_hiding[i]);
+		}
+	}
+	for (i = 0; i <= 2; i++) {
+		for (j = 0; j <= 7; j++) {
+			data->blackout_duration_margin[i][j] = bw_int_to_fixed(9999);
+			data->dispclk_required_for_blackout_duration[i][j] = bw_int_to_fixed(0);
+			data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(0);
+			for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+				if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0))) {
+					if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+						data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->line_source_transfer_time[k][i][j]));
+						data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->active_time[k]))));
+						if (bw_leq(vbios->maximum_blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))) {
+							data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999);
+						}
+						else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) {
+							data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, bw_sub(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k]));
+						}
+					}
+					else {
+						data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->line_source_transfer_time[k][i][j]));
+						data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
+						if (bw_ltn(vbios->maximum_blackout_recovery_time, bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))) {
+							data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999);
+						}
+						else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) {
+							data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, (bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k]));
+						}
+					}
+				}
+			}
+		}
+	}
+	if (bw_mtn(data->blackout_duration_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[high][s_high], vbios->high_voltage_max_dispclk)) {
+		data->cpup_state_change_enable = bw_def_yes;
+		if (bw_ltn(data->dispclk_required_for_blackout_recovery[high][s_high], vbios->high_voltage_max_dispclk)) {
+			data->cpuc_state_change_enable = bw_def_yes;
+		}
+		else {
+			data->cpuc_state_change_enable = bw_def_no;
+		}
+	}
+	else {
+		data->cpup_state_change_enable = bw_def_no;
+		data->cpuc_state_change_enable = bw_def_no;
+	}
+	/*nb p-state change enable*/
+	/*for dram speed/p-state change to be possible for a yclk(pclk) and sclk level there has to be positive margin and the dispclk required has to be*/
+	/*below the maximum.*/
+	/*the dram speed/p-state change margin is the minimum for all surfaces of the maximum latency hiding minus the dram speed/p-state change latency,*/
+	/*minus the dmif burst time, minus the source line transfer time*/
+	/*the maximum latency hiding is the minimum latency hiding plus one source line used for de-tiling in the line buffer, plus half the urgent latency*/
+	/*if stutter and dram clock state change are gated before cursor then the cursor latency hiding does not limit stutter or dram clock state change*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if ((dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) {
+				data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
+			}
+			else {
+				/*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) * h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/
+				data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
+			}
+			data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]);
+		}
+	}
+	/*initialize variables*/
+	number_of_displays_enabled = 0;
+	number_of_displays_enabled_with_margin = 0;
+	for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+		if (data->enable[k]) {
+			number_of_displays_enabled = number_of_displays_enabled + 1;
+		}
+		data->display_pstate_change_enable[k] = 0;
+	}
+	for (i = 0; i <= 2; i++) {
+		for (j = 0; j <= 7; j++) {
+			data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999);
+			data->dram_speed_change_margin = bw_int_to_fixed(9999);
+			data->dispclk_required_for_dram_speed_change[i][j] = bw_int_to_fixed(0);
+			data->num_displays_with_margin[i][j] = 0;
+			for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+				if (data->enable[k]) {
+					if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+						data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]);
+						if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
+							/*determine the minimum dram clock change margin for each set of clock frequencies*/
+							data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
+							/*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
+							data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
+							if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
+								data->display_pstate_change_enable[k] = 1;
+								data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
+							}
+						}
+					}
+					else {
+						data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]);
+						if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
+							/*determine the minimum dram clock change margin for each display pipe*/
+							data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
+							/*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
+							data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
+							if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
+								data->display_pstate_change_enable[k] = 1;
+								data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
+							}
+						}
+					}
+				}
+			}
+		}
+	}
+	/*determine the number of displays with margin to switch in the v_active region*/
+	for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+		if ((data->enable[k] == 1 && data->display_pstate_change_enable[k] == 1)) {
+			number_of_displays_enabled_with_margin = number_of_displays_enabled_with_margin + 1;
+		}
+	}
+	/*determine the number of displays that don't have any dram clock change margin, but*/
+	/*have the same resolution.  these displays can switch in a common vblank region if*/
+	/*their frames are aligned.*/
+	data->min_vblank_dram_speed_change_margin = bw_int_to_fixed(9999);
+	for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+		if (data->enable[k]) {
+			if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+				data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]);
+				data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]);
+			}
+			else {
+				data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->mcifwr_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]);
+				data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]);
+			}
+		}
+	}
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		data->displays_with_same_mode[i] = bw_int_to_fixed(0);
+		if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) {
+			for (j = 0; j <= maximum_number_of_surfaces - 1; j++) {
+				if ((data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
+					data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1));
+				}
+			}
+		}
+	}
+	/*compute the maximum number of aligned displays with no margin*/
+	number_of_aligned_displays_with_no_margin = 0;
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		number_of_aligned_displays_with_no_margin = bw_fixed_to_int(bw_max2(bw_int_to_fixed(number_of_aligned_displays_with_no_margin), data->displays_with_same_mode[i]));
+	}
+	/*dram clock change is possible, if all displays have positive margin except for one display or a group of*/
+	/*aligned displays with the same timing.*/
+	/*the display(s) with the negative margin can be switched in the v_blank region while the other*/
+	/*displays are in v_blank or v_active.*/
+	if ((number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk))) {
+		data->nbp_state_change_enable = bw_def_yes;
+	}
+	else {
+		data->nbp_state_change_enable = bw_def_no;
+	}
+	/*dram clock change is possible only in vblank if all displays are aligned and have no margin*/
+	if ((number_of_aligned_displays_with_no_margin == number_of_displays_enabled)) {
+		nbp_state_change_enable_blank = bw_def_yes;
+	}
+	else {
+		nbp_state_change_enable_blank = bw_def_no;
+	}
+	/*required yclk(pclk)*/
+	/*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/
+	/*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/
+	/*high yclk(pclk) has to be selected when dram speed/p-state change is not possible.*/
+	data->min_cursor_memory_interface_buffer_size_in_time = bw_int_to_fixed(9999);
+	/* number of cursor lines stored in the cursor data return buffer*/
+	num_cursor_lines = 0;
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0))) {
+				/*compute number of cursor lines stored in data return buffer*/
+				if (bw_leq(data->cursor_width_pixels[i], bw_int_to_fixed(64)) && dceip->large_cursor == 1) {
+					num_cursor_lines = 4;
+				}
+				else {
+					num_cursor_lines = 2;
+				}
+				data->min_cursor_memory_interface_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, bw_div(bw_mul(bw_div(bw_int_to_fixed(num_cursor_lines), data->vsr[i]), data->h_total[i]), data->pixel_rate[i]));
+			}
+		}
+	}
+	/*compute minimum time to read one chunk from the dmif buffer*/
+	if ((number_of_displays_enabled > 2)) {
+		data->chunk_request_delay = 0;
+	}
+	else {
+		data->chunk_request_delay = bw_fixed_to_int(bw_div(bw_int_to_fixed(512), vbios->high_voltage_max_dispclk));
+	}
+	data->min_read_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, data->min_dmif_size_in_time);
+	data->display_reads_time_for_data_transfer = bw_sub(bw_sub(data->min_read_buffer_size_in_time, data->total_dmifmc_urgent_latency), bw_int_to_fixed(data->chunk_request_delay));
+	data->display_writes_time_for_data_transfer = bw_sub(data->min_mcifwr_size_in_time, vbios->mcifwrmc_urgent_latency);
+	data->dmif_required_dram_bandwidth = bw_div(data->total_display_reads_required_dram_access_data, data->display_reads_time_for_data_transfer);
+	data->mcifwr_required_dram_bandwidth = bw_div(data->total_display_writes_required_dram_access_data, data->display_writes_time_for_data_transfer);
+	data->required_dmifmc_urgent_latency_for_page_close_open = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_total_page_close_open_time)), data->total_dmifmc_urgent_trips);
+	data->required_mcifmcwr_urgent_latency = bw_sub(data->min_mcifwr_size_in_time, data->mcifwr_total_page_close_open_time);
+	if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
+		data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999);
+		yclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
+		data->y_clk_level = high;
+		data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+	}
+	else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) {
+		data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999);
+		yclk_message = bw_def_exceeded_allowed_page_close_open;
+		data->y_clk_level = high;
+		data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+	}
+	else {
+		data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000));
+		if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) {
+			yclk_message = bw_fixed_to_int(vbios->low_yclk);
+			data->y_clk_level = low;
+			data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+		}
+		else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) {
+			yclk_message = bw_fixed_to_int(vbios->mid_yclk);
+			data->y_clk_level = mid;
+			data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+		}
+		else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) {
+			yclk_message = bw_fixed_to_int(vbios->high_yclk);
+			data->y_clk_level = high;
+			data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+		}
+		else {
+			yclk_message = bw_def_exceeded_allowed_maximum_bw;
+			data->y_clk_level = high;
+			data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+		}
+	}
+	/*required sclk*/
+	/*sclk requirement only makes sense if the total pte requests fit in the scatter-gather saw queque size*/
+	/*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/
+	/*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/
+	/*for dmif, pte and cursor requests have to be included.*/
+	data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+	data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+	if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
+		data->required_sclk = bw_int_to_fixed(9999);
+		sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
+		data->sclk_level = s_high;
+	}
+	else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) {
+		data->required_sclk = bw_int_to_fixed(9999);
+		sclk_message = bw_def_exceeded_allowed_page_close_open;
+		data->sclk_level = s_high;
+	}
+	else {
+		data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk);
+		if (bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) {
+			sclk_message = bw_def_low;
+			data->sclk_level = s_low;
+			data->required_sclk = vbios->low_sclk;
+		}
+		else if (bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) {
+			sclk_message = bw_def_mid;
+			data->sclk_level = s_mid1;
+			data->required_sclk = vbios->mid1_sclk;
+		}
+		else if (bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) {
+			sclk_message = bw_def_mid;
+			data->sclk_level = s_mid2;
+			data->required_sclk = vbios->mid2_sclk;
+		}
+		else if (bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) {
+			sclk_message = bw_def_mid;
+			data->sclk_level = s_mid3;
+			data->required_sclk = vbios->mid3_sclk;
+		}
+		else if (bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) {
+			sclk_message = bw_def_mid;
+			data->sclk_level = s_mid4;
+			data->required_sclk = vbios->mid4_sclk;
+		}
+		else if (bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) {
+			sclk_message = bw_def_mid;
+			data->sclk_level = s_mid5;
+			data->required_sclk = vbios->mid5_sclk;
+		}
+		else if (bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) {
+			sclk_message = bw_def_mid;
+			data->sclk_level = s_mid6;
+			data->required_sclk = vbios->mid6_sclk;
+		}
+		else if (bw_ltn(data->required_sclk, sclk[s_high])) {
+			sclk_message = bw_def_high;
+			data->sclk_level = s_high;
+			data->required_sclk = vbios->high_sclk;
+		}
+		else {
+			sclk_message = bw_def_exceeded_allowed_maximum_sclk;
+			data->sclk_level = s_high;
+			/*required_sclk = high_sclk*/
+		}
+	}
+	/*dispclk*/
+	/*if dispclk is set to the maximum, ramping is not required.  dispclk required without ramping is less than the dispclk required with ramping.*/
+	/*if dispclk required without ramping is more than the maximum dispclk, that is the dispclk required, and the mode is not supported*/
+	/*if that does not happen, but dispclk required with ramping is more than the maximum dispclk, dispclk required is just the maximum dispclk*/
+	/*if that does not happen either, dispclk required is the dispclk required with ramping.*/
+	/*dispclk required without ramping is the maximum of the one required for display pipe pixel throughput, for scaler throughput, for total read request thrrougput and for dram/np p-state change if enabled.*/
+	/*the display pipe pixel throughput is the maximum of lines in per line out in the beginning of the frame and lines in per line out in the middle of the frame multiplied by the horizontal blank and chunk granularity factor, altogether multiplied by the ratio of the source width to the line time, divided by the line buffer pixels per dispclk throughput, and multiplied by the display pipe throughput factor.*/
+	/*the horizontal blank and chunk granularity factor is the ratio of the line time divided by the line time minus half the horizontal blank and chunk time.  it applies when the lines in per line out is not 2 or 4.*/
+	/*the dispclk required for scaler throughput is the product of the pixel rate and the scaling limits factor.*/
+	/*the dispclk required for total read request throughput is the product of the peak request-per-second bandwidth and the dispclk cycles per request, divided by the request efficiency.*/
+	/*for the dispclk required with ramping, instead of multiplying just the pipe throughput by the display pipe throughput factor, we multiply the scaler and pipe throughput by the ramping factor.*/
+	/*the scaling limits factor is the product of the horizontal scale ratio, and the ratio of the vertical taps divided by the scaler efficiency clamped to at least 1.*/
+	/*the scaling limits factor itself it also clamped to at least 1*/
+	/*if doing downscaling with the pre-downscaler enabled, the horizontal scale ratio should not be considered above (use "1")*/
+	data->downspread_factor = bw_add(bw_int_to_fixed(1), bw_div(vbios->down_spread_percentage, bw_int_to_fixed(100)));
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (surface_type[i] == bw_def_graphics) {
+				switch (data->lb_bpc[i]) {
+				case 6:
+					data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component;
+					break;
+				case 8:
+					data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component;
+					break;
+				case 10:
+					data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component;
+					break;
+				default:
+					data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component;
+					break;
+				}
+				if (data->use_alpha[i] == 1) {
+					data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency);
+				}
+			}
+			else {
+				switch (data->lb_bpc[i]) {
+				case 6:
+					data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component;
+					break;
+				case 8:
+					data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component;
+					break;
+				case 10:
+					data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component;
+					break;
+				default:
+					data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency12_bit_per_component;
+					break;
+				}
+			}
+			if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) {
+				data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i]));
+			}
+			else {
+				data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1))));
+			}
+			data->display_pipe_pixel_throughput = bw_div(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], bw_mul(data->lb_lines_in_per_line_out_in_middle_of_frame[i], data->horizontal_blank_and_chunk_granularity_factor[i])), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), dceip->lb_write_pixels_per_dispclk);
+			data->dispclk_required_without_ramping[i] = bw_mul(data->downspread_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), bw_mul(dceip->display_pipe_throughput_factor, data->display_pipe_pixel_throughput)));
+			data->dispclk_required_with_ramping[i] = bw_mul(dceip->dispclk_ramping_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), data->display_pipe_pixel_throughput));
+		}
+	}
+	data->total_dispclk_required_with_ramping = bw_int_to_fixed(0);
+	data->total_dispclk_required_without_ramping = bw_int_to_fixed(0);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (bw_ltn(data->total_dispclk_required_with_ramping, data->dispclk_required_with_ramping[i])) {
+				data->total_dispclk_required_with_ramping = data->dispclk_required_with_ramping[i];
+			}
+			if (bw_ltn(data->total_dispclk_required_without_ramping, data->dispclk_required_without_ramping[i])) {
+				data->total_dispclk_required_without_ramping = data->dispclk_required_without_ramping[i];
+			}
+		}
+	}
+	data->total_read_request_bandwidth = bw_int_to_fixed(0);
+	data->total_write_request_bandwidth = bw_int_to_fixed(0);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+				data->total_read_request_bandwidth = bw_add(data->total_read_request_bandwidth, data->request_bandwidth[i]);
+			}
+			else {
+				data->total_write_request_bandwidth = bw_add(data->total_write_request_bandwidth, data->request_bandwidth[i]);
+			}
+		}
+	}
+	data->dispclk_required_for_total_read_request_bandwidth = bw_div(bw_mul(data->total_read_request_bandwidth, dceip->dispclk_per_request), dceip->request_efficiency);
+	data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping, data->dispclk_required_for_total_read_request_bandwidth);
+	data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping, data->dispclk_required_for_total_read_request_bandwidth);
+	if (data->cpuc_state_change_enable == bw_def_yes) {
+		data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]);
+		data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]);
+	}
+	if (data->cpup_state_change_enable == bw_def_yes) {
+		data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
+		data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
+	}
+	if (data->nbp_state_change_enable == bw_def_yes) {
+		data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
+		data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
+	}
+	if (bw_ltn(data->total_dispclk_required_with_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) {
+		data->dispclk = data->total_dispclk_required_with_ramping_with_request_bandwidth;
+	}
+	else if (bw_ltn(data->total_dispclk_required_without_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) {
+		data->dispclk = vbios->high_voltage_max_dispclk;
+	}
+	else {
+		data->dispclk = data->total_dispclk_required_without_ramping_with_request_bandwidth;
+	}
+	/* required core voltage*/
+	/* the core voltage required is low if sclk, yclk(pclk)and dispclk are within the low limits*/
+	/* otherwise, the core voltage required is medium if yclk (pclk) is within the low limit and sclk and dispclk are within the medium limit*/
+	/* otherwise, the core voltage required is high if the three clocks are within the high limits*/
+	/* otherwise, or if the mode is not supported, core voltage requirement is not applicable*/
+	if (pipe_check == bw_def_notok) {
+		voltage = bw_def_na;
+	}
+	else if (mode_check == bw_def_notok) {
+		voltage = bw_def_notok;
+	}
+	else if (bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) && sclk_message == bw_def_low && bw_ltn(data->dispclk, vbios->low_voltage_max_dispclk)) {
+		voltage = bw_def_0_72;
+	}
+	else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid) && bw_ltn(data->dispclk, vbios->mid_voltage_max_dispclk)) {
+		voltage = bw_def_0_8;
+	}
+	else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->high_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid || sclk_message == bw_def_high) && bw_leq(data->dispclk, vbios->high_voltage_max_dispclk)) {
+		if ((data->nbp_state_change_enable == bw_def_no && nbp_state_change_enable_blank == bw_def_no)) {
+			voltage = bw_def_high_no_nbp_state_change;
+		}
+		else {
+			voltage = bw_def_0_9;
+		}
+	}
+	else {
+		voltage = bw_def_notok;
+	}
+	if (voltage == bw_def_0_72) {
+		data->max_phyclk = vbios->low_voltage_max_phyclk;
+	}
+	else if (voltage == bw_def_0_8) {
+		data->max_phyclk = vbios->mid_voltage_max_phyclk;
+	}
+	else {
+		data->max_phyclk = vbios->high_voltage_max_phyclk;
+	}
+	/*required blackout recovery time*/
+	data->blackout_recovery_time = bw_int_to_fixed(0);
+	for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+		if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0)) && data->cpup_state_change_enable == bw_def_yes) {
+			if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+				data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level]));
+				if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])))))) {
+					data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k])))));
+				}
+			}
+			else {
+				data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]));
+				if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])))))) {
+					data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level]), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k])))));
+				}
+			}
+		}
+	}
+	/*sclk deep sleep*/
+	/*during self-refresh, sclk can be reduced to dispclk divided by the minimum pixels in the data fifo entry, with 15% margin, but shoudl not be set to less than the request bandwidth.*/
+	/*the data fifo entry is 16 pixels for the writeback, 64 bytes/bytes_per_pixel for the graphics, 16 pixels for the parallel rotation underlay,*/
+	/*and 16 bytes/bytes_per_pixel for the orthogonal rotation underlay.*/
+	/*in parallel mode (underlay pipe), the data read from the dmifv buffer is variable and based on the pixel depth (8bbp - 16 bytes, 16 bpp - 32 bytes, 32 bpp - 64 bytes)*/
+	/*in orthogonal mode (underlay pipe), the data read from the dmifv buffer is fixed at 16 bytes.*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
+				data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16);
+			}
+			else if (surface_type[i] == bw_def_graphics) {
+				data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(64), bw_int_to_fixed(data->bytes_per_pixel[i]));
+			}
+			else if (data->orthogonal_rotation[i] == 0) {
+				data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16);
+			}
+			else {
+				data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(16), bw_int_to_fixed(data->bytes_per_pixel[i]));
+			}
+		}
+	}
+	data->min_pixels_per_data_fifo_entry = bw_int_to_fixed(9999);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (bw_mtn(data->min_pixels_per_data_fifo_entry, data->pixels_per_data_fifo_entry[i])) {
+				data->min_pixels_per_data_fifo_entry = data->pixels_per_data_fifo_entry[i];
+			}
+		}
+	}
+	data->sclk_deep_sleep = bw_max2(bw_div(bw_mul(data->dispclk, bw_frc_to_fixed(115, 100)), data->min_pixels_per_data_fifo_entry), data->total_read_request_bandwidth);
+	/*urgent, stutter and nb-p_state watermark*/
+	/*the urgent watermark is the maximum of the urgent trip time plus the pixel transfer time, the urgent trip times to get data for the first pixel, and the urgent trip times to get data for the last pixel.*/
+	/*the stutter exit watermark is the self refresh exit time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel.  it does not apply to the writeback.*/
+	/*the nb p-state change watermark is the dram speed/p-state change time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel.*/
+	/*the pixel transfer time is the maximum of the time to transfer the source pixels required for the first output pixel, and the time to transfer the pixels for the last output pixel minus the active line time.*/
+	/*blackout_duration is added to the urgent watermark*/
+	data->chunk_request_time = bw_int_to_fixed(0);
+	data->cursor_request_time = bw_int_to_fixed(0);
+	/*compute total time to request one chunk from each active display pipe*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->chunk_request_time = bw_add(data->chunk_request_time, (bw_div((bw_div(bw_int_to_fixed(pixels_per_chunk * data->bytes_per_pixel[i]), data->useful_bytes_per_request[i])), bw_min2(sclk[data->sclk_level], bw_div(data->dispclk, bw_int_to_fixed(2))))));
+		}
+	}
+	/*compute total time to request cursor data*/
+	data->cursor_request_time = (bw_div(data->cursor_total_data, (bw_mul(bw_int_to_fixed(32), sclk[data->sclk_level]))));
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->line_source_pixels_transfer_time = bw_max2(bw_div(bw_div(data->src_pixels_for_first_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), bw_sub(bw_div(bw_div(data->src_pixels_for_last_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), data->active_time[i]));
+			if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+				data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time);
+				data->stutter_exit_watermark[i] = bw_add(bw_sub(vbios->stutter_self_refresh_exit_latency, data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
+				data->stutter_entry_watermark[i] = bw_add(bw_sub(bw_add(vbios->stutter_self_refresh_exit_latency, vbios->stutter_self_refresh_entry_latency), data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
+				/*unconditionally remove black out time from the nb p_state watermark*/
+				if ((data->display_pstate_change_enable[i] == 1)) {
+					data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
+				}
+				else {
+					/*maximize the watermark to force the switch in the vb_lank region of the frame*/
+					data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000);
+				}
+			}
+			else {
+				data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time);
+				data->stutter_exit_watermark[i] = bw_int_to_fixed(0);
+				data->stutter_entry_watermark[i] = bw_int_to_fixed(0);
+				if ((data->display_pstate_change_enable[i] == 1)) {
+					data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
+				}
+				else {
+					/*maximize the watermark to force the switch in the vb_lank region of the frame*/
+					data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000);
+				}
+			}
+		}
+	}
+	/*stutter mode enable*/
+	/*in the multi-display case the stutter exit or entry watermark cannot exceed the minimum latency hiding capabilities of the*/
+	/*display pipe.*/
+	data->stutter_mode_enable = data->cpuc_state_change_enable;
+	if (data->number_of_displays > 1) {
+		for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+			if (data->enable[i]) {
+				if ((bw_mtn(data->stutter_exit_watermark[i], data->minimum_latency_hiding[i]) || bw_mtn(data->stutter_entry_watermark[i], data->minimum_latency_hiding[i]))) {
+					data->stutter_mode_enable = bw_def_no;
+				}
+			}
+		}
+	}
+	/*performance metrics*/
+	/* display read access efficiency (%)*/
+	/* display write back access efficiency (%)*/
+	/* stutter efficiency (%)*/
+	/* extra underlay pitch recommended for efficiency (pixels)*/
+	/* immediate flip time (us)*/
+	/* latency for other clients due to urgent display read (us)*/
+	/* latency for other clients due to urgent display write (us)*/
+	/* average bandwidth consumed by display (no compression) (gb/s)*/
+	/* required dram  bandwidth (gb/s)*/
+	/* required sclk (m_hz)*/
+	/* required rd urgent latency (us)*/
+	/* nb p-state change margin (us)*/
+	/*dmif and mcifwr dram access efficiency*/
+	/*is the ratio between the ideal dram access time (which is the data buffer size in memory divided by the dram bandwidth), and the actual time which is the total page close-open time.  but it cannot exceed the dram efficiency provided by the memory subsystem*/
+	data->dmifdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_reads_required_dram_access_data, data->dram_bandwidth), data->dmif_total_page_close_open_time), bw_int_to_fixed(1));
+	if (bw_mtn(data->total_display_writes_required_dram_access_data, bw_int_to_fixed(0))) {
+		data->mcifwrdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_writes_required_dram_access_data, data->dram_bandwidth), data->mcifwr_total_page_close_open_time), bw_int_to_fixed(1));
+	}
+	else {
+		data->mcifwrdram_access_efficiency = bw_int_to_fixed(0);
+	}
+	/*average bandwidth*/
+	/*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
+	/*the average bandwidth with compression is the same, divided by the compression ratio*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+			data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
+		}
+	}
+	data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
+	data->total_average_bandwidth = bw_int_to_fixed(0);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
+			data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
+		}
+	}
+	/*stutter efficiency*/
+	/*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration.  only applies if the display write-back is not enabled.*/
+	/*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/
+	/*the frame-average time in self-refresh is the stutter cycle minus the self refresh exit latency and the burst time*/
+	/*the stutter cycle is the dmif buffer size reduced by the excess of the stutter exit watermark over the lb size in time.*/
+	/*the burst time is the data needed during the stutter cycle divided by the available bandwidth*/
+	/*compute the time read all the data from the dmif buffer to the lb (dram refresh period)*/
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->stutter_refresh_duration[i] = bw_sub(bw_mul(bw_div(bw_div(bw_mul(bw_div(bw_div(data->adjusted_data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]), bw_max2(bw_int_to_fixed(0), bw_sub(data->stutter_exit_watermark[i], bw_div(bw_mul((bw_sub(data->lb_partitions[i], bw_int_to_fixed(1))), data->h_total[i]), data->pixel_rate[i]))));
+			data->stutter_dmif_buffer_size[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(bw_mul(data->stutter_refresh_duration[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]);
+		}
+	}
+	data->min_stutter_refresh_duration = bw_int_to_fixed(9999);
+	data->total_stutter_dmif_buffer_size = 0;
+	data->total_bytes_requested = 0;
+	data->min_stutter_dmif_buffer_size = 9999;
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			if (bw_mtn(data->min_stutter_refresh_duration, data->stutter_refresh_duration[i])) {
+				data->min_stutter_refresh_duration = data->stutter_refresh_duration[i];
+				data->total_bytes_requested = bw_fixed_to_int(bw_add(bw_int_to_fixed(data->total_bytes_requested), (bw_mul(bw_mul(data->source_height_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[i]), bw_int_to_fixed(data->bytes_per_pixel[i])))));
+				data->min_stutter_dmif_buffer_size = bw_fixed_to_int(data->stutter_dmif_buffer_size[i]);
+			}
+			data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size)));
+		}
+	}
+	data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_min2(bw_mul(data->dram_bandwidth, data->dmifdram_access_efficiency), bw_mul(sclk[data->sclk_level], bw_int_to_fixed(32))));
+	data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size;
+	data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time);
+	data->time_in_self_refresh = data->min_stutter_refresh_duration;
+	if (data->d1_display_write_back_dwb_enable == 1) {
+		data->stutter_efficiency = bw_int_to_fixed(0);
+	}
+	else if (bw_ltn(data->time_in_self_refresh, bw_int_to_fixed(0))) {
+		data->stutter_efficiency = bw_int_to_fixed(0);
+	}
+	else {
+		/*compute stutter efficiency assuming 60 hz refresh rate*/
+		data->stutter_efficiency = bw_max2(bw_int_to_fixed(0), bw_mul((bw_sub(bw_int_to_fixed(1), (bw_div(bw_mul((bw_add(vbios->stutter_self_refresh_exit_latency, data->stutter_burst_time)), bw_int_to_fixed(data->num_stutter_bursts)), bw_frc_to_fixed(166666667, 10000))))), bw_int_to_fixed(100)));
+	}
+	/*immediate flip time*/
+	/*if scatter gather is enabled, the immediate flip takes a number of urgent memory trips equivalent to the pte requests in a row divided by the pte request limit.*/
+	/*otherwise, it may take just one urgenr memory trip*/
+	data->worst_number_of_trips_to_memory = bw_int_to_fixed(1);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) {
+			data->number_of_trips_to_memory_for_getting_apte_row[i] = bw_ceil2(bw_div(data->scatter_gather_pte_requests_in_row[i], data->scatter_gather_pte_request_limit[i]), bw_int_to_fixed(1));
+			if (bw_ltn(data->worst_number_of_trips_to_memory, data->number_of_trips_to_memory_for_getting_apte_row[i])) {
+				data->worst_number_of_trips_to_memory = data->number_of_trips_to_memory_for_getting_apte_row[i];
+			}
+		}
+	}
+	data->immediate_flip_time = bw_mul(data->worst_number_of_trips_to_memory, data->total_dmifmc_urgent_latency);
+	/*worst latency for other clients*/
+	/*it is the urgent latency plus the urgent burst time*/
+	data->latency_for_non_dmif_clients = bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]);
+	if (data->d1_display_write_back_dwb_enable == 1) {
+		data->latency_for_non_mcifwr_clients = bw_add(vbios->mcifwrmc_urgent_latency, dceip->mcifwr_all_surfaces_burst_time);
+	}
+	else {
+		data->latency_for_non_mcifwr_clients = bw_int_to_fixed(0);
+	}
+	/*dmif mc urgent latency suppported in high sclk and yclk*/
+	data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_burst_time[high][s_high])), data->total_dmifmc_urgent_trips);
+	/*dram speed/p-state change margin*/
+	/*in the multi-display case the nb p-state change watermark cannot exceed the average lb size plus the dmif size or the cursor dcp buffer size*/
+	data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999);
+	data->nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999);
+	for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+		if (data->enable[i]) {
+			data->nbp_state_dram_speed_change_latency_supported = bw_min2(data->nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(data->maximum_latency_hiding_with_cursor[i], data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency));
+			data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_min2(data->v_blank_nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[i], bw_sub(bw_div(data->src_height[i], data->v_scale_ratio[i]), bw_int_to_fixed(4)))), data->h_total[i]), data->pixel_rate[i]), data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency));
+		}
+	}
+	/*sclk required vs urgent latency*/
+	for (i = 1; i <= 5; i++) {
+		data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i)));
+		if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) {
+			data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+		}
+		else {
+			data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na);
+		}
+	}
+	/*output link bit per pixel supported*/
+	for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+		data->output_bpphdmi[k] = bw_def_na;
+		data->output_bppdp4_lane_hbr[k] = bw_def_na;
+		data->output_bppdp4_lane_hbr2[k] = bw_def_na;
+		data->output_bppdp4_lane_hbr3[k] = bw_def_na;
+		if (data->enable[k]) {
+			data->output_bpphdmi[k] = bw_fixed_to_int(bw_mul(bw_div(bw_min2(bw_int_to_fixed(600), data->max_phyclk), data->pixel_rate[k]), bw_int_to_fixed(24)));
+			if (bw_meq(data->max_phyclk, bw_int_to_fixed(270))) {
+				data->output_bppdp4_lane_hbr[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(270), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
+			}
+			if (bw_meq(data->max_phyclk, bw_int_to_fixed(540))) {
+				data->output_bppdp4_lane_hbr2[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(540), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
+			}
+			if (bw_meq(data->max_phyclk, bw_int_to_fixed(810))) {
+				data->output_bppdp4_lane_hbr3[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(810), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
+			}
+		}
+	}
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
+	struct bw_calcs_vbios *bw_vbios,
+	struct hw_asic_id asic_id)
+{
+	struct bw_calcs_dceip dceip = { 0 };
+	struct bw_calcs_vbios vbios = { 0 };
+
+	enum bw_calcs_version version = bw_calcs_version_from_asic_id(asic_id);
+
+	dceip.version = version;
+
+	switch (version) {
+	case BW_CALCS_VERSION_CARRIZO:
+		vbios.memory_type = bw_def_gddr5;
+		vbios.dram_channel_width_in_bits = 64;
+		vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+		vbios.number_of_dram_banks = 8;
+		vbios.high_yclk = bw_int_to_fixed(1600);
+		vbios.mid_yclk = bw_int_to_fixed(1600);
+		vbios.low_yclk = bw_frc_to_fixed(66666, 100);
+		vbios.low_sclk = bw_int_to_fixed(200);
+		vbios.mid1_sclk = bw_int_to_fixed(300);
+		vbios.mid2_sclk = bw_int_to_fixed(300);
+		vbios.mid3_sclk = bw_int_to_fixed(300);
+		vbios.mid4_sclk = bw_int_to_fixed(300);
+		vbios.mid5_sclk = bw_int_to_fixed(300);
+		vbios.mid6_sclk = bw_int_to_fixed(300);
+		vbios.high_sclk = bw_frc_to_fixed(62609, 100);
+		vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
+		vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
+		vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
+		vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+		vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.data_return_bus_width = bw_int_to_fixed(32);
+		vbios.trc = bw_int_to_fixed(50);
+		vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+		vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(153, 10);
+		vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+		vbios.nbp_state_change_latency = bw_frc_to_fixed(19649, 1000);
+		vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+		vbios.scatter_gather_enable = true;
+		vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+		vbios.cursor_width = 32;
+		vbios.average_compression_rate = 4;
+		vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+		vbios.blackout_duration = bw_int_to_fixed(18); /* us */
+		vbios.maximum_blackout_recovery_time = bw_int_to_fixed(20);
+
+		dceip.large_cursor = false;
+		dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+		dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+		dceip.cursor_max_outstanding_group_num = 1;
+		dceip.lines_interleaved_into_lb = 2;
+		dceip.chunk_width = 256;
+		dceip.number_of_graphics_pipes = 3;
+		dceip.number_of_underlay_pipes = 1;
+		dceip.low_power_tiling_mode = 0;
+		dceip.display_write_back_supported = false;
+		dceip.argb_compression_support = false;
+		dceip.underlay_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35556, 10000);
+		dceip.underlay_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.underlay_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.underlay_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.graphics_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35, 10);
+		dceip.graphics_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.graphics_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.graphics_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+		dceip.max_dmif_buffer_allocated = 2;
+		dceip.graphics_dmif_size = 12288;
+		dceip.underlay_luma_dmif_size = 19456;
+		dceip.underlay_chroma_dmif_size = 23552;
+		dceip.pre_downscaler_enabled = true;
+		dceip.underlay_downscale_prefetch_enabled = true;
+		dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+		dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
+		dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
+		dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+			bw_int_to_fixed(0);
+		dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.underlay420_chroma_lb_size_per_component =
+			bw_int_to_fixed(164352);
+		dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.cursor_chunk_width = bw_int_to_fixed(64);
+		dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+		dceip.underlay_maximum_width_efficient_for_tiling =
+			bw_int_to_fixed(1920);
+		dceip.underlay_maximum_height_efficient_for_tiling =
+			bw_int_to_fixed(1080);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+			bw_frc_to_fixed(3, 10);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+			bw_int_to_fixed(25);
+		dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+			2);
+		dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+			bw_int_to_fixed(128);
+		dceip.limit_excessive_outstanding_dmif_requests = true;
+		dceip.linear_mode_line_request_alternation_slice =
+			bw_int_to_fixed(64);
+		dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+			32;
+		dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+		dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+		dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+		dceip.dispclk_per_request = bw_int_to_fixed(2);
+		dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+		dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+		dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+		dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
+		break;
+	case BW_CALCS_VERSION_POLARIS10:
+		vbios.memory_type = bw_def_gddr5;
+		vbios.dram_channel_width_in_bits = 32;
+		vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+		vbios.number_of_dram_banks = 8;
+		vbios.high_yclk = bw_int_to_fixed(6000);
+		vbios.mid_yclk = bw_int_to_fixed(3200);
+		vbios.low_yclk = bw_int_to_fixed(1000);
+		vbios.low_sclk = bw_int_to_fixed(300);
+		vbios.mid1_sclk = bw_int_to_fixed(400);
+		vbios.mid2_sclk = bw_int_to_fixed(500);
+		vbios.mid3_sclk = bw_int_to_fixed(600);
+		vbios.mid4_sclk = bw_int_to_fixed(700);
+		vbios.mid5_sclk = bw_int_to_fixed(800);
+		vbios.mid6_sclk = bw_int_to_fixed(974);
+		vbios.high_sclk = bw_int_to_fixed(1154);
+		vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
+		vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
+		vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
+		vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+		vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.data_return_bus_width = bw_int_to_fixed(32);
+		vbios.trc = bw_int_to_fixed(48);
+		vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
+		vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+		vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+		vbios.nbp_state_change_latency = bw_int_to_fixed(45);
+		vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+		vbios.scatter_gather_enable = true;
+		vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+		vbios.cursor_width = 32;
+		vbios.average_compression_rate = 4;
+		vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+		vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+		vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+		dceip.large_cursor = false;
+		dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+		dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+		dceip.cursor_max_outstanding_group_num = 1;
+		dceip.lines_interleaved_into_lb = 2;
+		dceip.chunk_width = 256;
+		dceip.number_of_graphics_pipes = 6;
+		dceip.number_of_underlay_pipes = 0;
+		dceip.low_power_tiling_mode = 0;
+		dceip.display_write_back_supported = false;
+		dceip.argb_compression_support = true;
+		dceip.underlay_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35556, 10000);
+		dceip.underlay_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.underlay_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.underlay_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.graphics_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35, 10);
+		dceip.graphics_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.graphics_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.graphics_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+		dceip.max_dmif_buffer_allocated = 4;
+		dceip.graphics_dmif_size = 12288;
+		dceip.underlay_luma_dmif_size = 19456;
+		dceip.underlay_chroma_dmif_size = 23552;
+		dceip.pre_downscaler_enabled = true;
+		dceip.underlay_downscale_prefetch_enabled = true;
+		dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+		dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
+		dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
+		dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+			bw_int_to_fixed(1);
+		dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.underlay420_chroma_lb_size_per_component =
+			bw_int_to_fixed(164352);
+		dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.cursor_chunk_width = bw_int_to_fixed(64);
+		dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+		dceip.underlay_maximum_width_efficient_for_tiling =
+			bw_int_to_fixed(1920);
+		dceip.underlay_maximum_height_efficient_for_tiling =
+			bw_int_to_fixed(1080);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+			bw_frc_to_fixed(3, 10);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+			bw_int_to_fixed(25);
+		dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+			2);
+		dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+			bw_int_to_fixed(128);
+		dceip.limit_excessive_outstanding_dmif_requests = true;
+		dceip.linear_mode_line_request_alternation_slice =
+			bw_int_to_fixed(64);
+		dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+			32;
+		dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+		dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+		dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+		dceip.dispclk_per_request = bw_int_to_fixed(2);
+		dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+		dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+		dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+		dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+		break;
+	case BW_CALCS_VERSION_POLARIS11:
+		vbios.memory_type = bw_def_gddr5;
+		vbios.dram_channel_width_in_bits = 32;
+		vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+		vbios.number_of_dram_banks = 8;
+		vbios.high_yclk = bw_int_to_fixed(6000);
+		vbios.mid_yclk = bw_int_to_fixed(3200);
+		vbios.low_yclk = bw_int_to_fixed(1000);
+		vbios.low_sclk = bw_int_to_fixed(300);
+		vbios.mid1_sclk = bw_int_to_fixed(400);
+		vbios.mid2_sclk = bw_int_to_fixed(500);
+		vbios.mid3_sclk = bw_int_to_fixed(600);
+		vbios.mid4_sclk = bw_int_to_fixed(700);
+		vbios.mid5_sclk = bw_int_to_fixed(800);
+		vbios.mid6_sclk = bw_int_to_fixed(974);
+		vbios.high_sclk = bw_int_to_fixed(1154);
+		vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
+		vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
+		vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
+		vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+		vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.data_return_bus_width = bw_int_to_fixed(32);
+		vbios.trc = bw_int_to_fixed(48);
+		if (vbios.number_of_dram_channels == 2) // 64-bit
+			vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+		else
+			vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
+		vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+		vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+		vbios.nbp_state_change_latency = bw_int_to_fixed(45);
+		vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+		vbios.scatter_gather_enable = true;
+		vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+		vbios.cursor_width = 32;
+		vbios.average_compression_rate = 4;
+		vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+		vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+		vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+		dceip.large_cursor = false;
+		dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+		dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+		dceip.cursor_max_outstanding_group_num = 1;
+		dceip.lines_interleaved_into_lb = 2;
+		dceip.chunk_width = 256;
+		dceip.number_of_graphics_pipes = 5;
+		dceip.number_of_underlay_pipes = 0;
+		dceip.low_power_tiling_mode = 0;
+		dceip.display_write_back_supported = false;
+		dceip.argb_compression_support = true;
+		dceip.underlay_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35556, 10000);
+		dceip.underlay_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.underlay_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.underlay_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.graphics_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35, 10);
+		dceip.graphics_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.graphics_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.graphics_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+		dceip.max_dmif_buffer_allocated = 4;
+		dceip.graphics_dmif_size = 12288;
+		dceip.underlay_luma_dmif_size = 19456;
+		dceip.underlay_chroma_dmif_size = 23552;
+		dceip.pre_downscaler_enabled = true;
+		dceip.underlay_downscale_prefetch_enabled = true;
+		dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+		dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
+		dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
+		dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+			bw_int_to_fixed(1);
+		dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.underlay420_chroma_lb_size_per_component =
+			bw_int_to_fixed(164352);
+		dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.cursor_chunk_width = bw_int_to_fixed(64);
+		dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+		dceip.underlay_maximum_width_efficient_for_tiling =
+			bw_int_to_fixed(1920);
+		dceip.underlay_maximum_height_efficient_for_tiling =
+			bw_int_to_fixed(1080);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+			bw_frc_to_fixed(3, 10);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+			bw_int_to_fixed(25);
+		dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+			2);
+		dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+			bw_int_to_fixed(128);
+		dceip.limit_excessive_outstanding_dmif_requests = true;
+		dceip.linear_mode_line_request_alternation_slice =
+			bw_int_to_fixed(64);
+		dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+			32;
+		dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+		dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+		dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+		dceip.dispclk_per_request = bw_int_to_fixed(2);
+		dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+		dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+		dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+		dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+		break;
+	case BW_CALCS_VERSION_STONEY:
+		vbios.memory_type = bw_def_gddr5;
+		vbios.dram_channel_width_in_bits = 64;
+		vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+		vbios.number_of_dram_banks = 8;
+		vbios.high_yclk = bw_int_to_fixed(1866);
+		vbios.mid_yclk = bw_int_to_fixed(1866);
+		vbios.low_yclk = bw_int_to_fixed(1333);
+		vbios.low_sclk = bw_int_to_fixed(200);
+		vbios.mid1_sclk = bw_int_to_fixed(600);
+		vbios.mid2_sclk = bw_int_to_fixed(600);
+		vbios.mid3_sclk = bw_int_to_fixed(600);
+		vbios.mid4_sclk = bw_int_to_fixed(600);
+		vbios.mid5_sclk = bw_int_to_fixed(600);
+		vbios.mid6_sclk = bw_int_to_fixed(600);
+		vbios.high_sclk = bw_int_to_fixed(800);
+		vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
+		vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
+		vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
+		vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+		vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.data_return_bus_width = bw_int_to_fixed(32);
+		vbios.trc = bw_int_to_fixed(50);
+		vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+		vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(158, 10);
+		vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+		vbios.nbp_state_change_latency = bw_frc_to_fixed(2008, 100);
+		vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+		vbios.scatter_gather_enable = true;
+		vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+		vbios.cursor_width = 32;
+		vbios.average_compression_rate = 4;
+		vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+		vbios.blackout_duration = bw_int_to_fixed(18); /* us */
+		vbios.maximum_blackout_recovery_time = bw_int_to_fixed(20);
+
+		dceip.large_cursor = false;
+		dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+		dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+		dceip.cursor_max_outstanding_group_num = 1;
+		dceip.lines_interleaved_into_lb = 2;
+		dceip.chunk_width = 256;
+		dceip.number_of_graphics_pipes = 2;
+		dceip.number_of_underlay_pipes = 1;
+		dceip.low_power_tiling_mode = 0;
+		dceip.display_write_back_supported = false;
+		dceip.argb_compression_support = true;
+		dceip.underlay_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35556, 10000);
+		dceip.underlay_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.underlay_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.underlay_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.graphics_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35, 10);
+		dceip.graphics_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.graphics_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.graphics_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+		dceip.max_dmif_buffer_allocated = 2;
+		dceip.graphics_dmif_size = 12288;
+		dceip.underlay_luma_dmif_size = 19456;
+		dceip.underlay_chroma_dmif_size = 23552;
+		dceip.pre_downscaler_enabled = true;
+		dceip.underlay_downscale_prefetch_enabled = true;
+		dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+		dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
+		dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
+		dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+			bw_int_to_fixed(0);
+		dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.underlay420_chroma_lb_size_per_component =
+			bw_int_to_fixed(164352);
+		dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.cursor_chunk_width = bw_int_to_fixed(64);
+		dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+		dceip.underlay_maximum_width_efficient_for_tiling =
+			bw_int_to_fixed(1920);
+		dceip.underlay_maximum_height_efficient_for_tiling =
+			bw_int_to_fixed(1080);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+			bw_frc_to_fixed(3, 10);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+			bw_int_to_fixed(25);
+		dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+			2);
+		dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+			bw_int_to_fixed(128);
+		dceip.limit_excessive_outstanding_dmif_requests = true;
+		dceip.linear_mode_line_request_alternation_slice =
+			bw_int_to_fixed(64);
+		dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+			32;
+		dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+		dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+		dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+		dceip.dispclk_per_request = bw_int_to_fixed(2);
+		dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+		dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+		dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+		dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+		break;
+	case BW_CALCS_VERSION_VEGA10:
+		vbios.memory_type = bw_def_hbm;
+		vbios.dram_channel_width_in_bits = 128;
+		vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+		vbios.number_of_dram_banks = 16;
+		vbios.high_yclk = bw_int_to_fixed(2400);
+		vbios.mid_yclk = bw_int_to_fixed(1700);
+		vbios.low_yclk = bw_int_to_fixed(1000);
+		vbios.low_sclk = bw_int_to_fixed(300);
+		vbios.mid1_sclk = bw_int_to_fixed(350);
+		vbios.mid2_sclk = bw_int_to_fixed(400);
+		vbios.mid3_sclk = bw_int_to_fixed(500);
+		vbios.mid4_sclk = bw_int_to_fixed(600);
+		vbios.mid5_sclk = bw_int_to_fixed(700);
+		vbios.mid6_sclk = bw_int_to_fixed(760);
+		vbios.high_sclk = bw_int_to_fixed(776);
+		vbios.low_voltage_max_dispclk = bw_int_to_fixed(460);
+		vbios.mid_voltage_max_dispclk = bw_int_to_fixed(670);
+		vbios.high_voltage_max_dispclk = bw_int_to_fixed(1133);
+		vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+		vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.data_return_bus_width = bw_int_to_fixed(32);
+		vbios.trc = bw_int_to_fixed(48);
+		vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
+		vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(75, 10);
+		vbios.stutter_self_refresh_entry_latency = bw_frc_to_fixed(19, 10);
+		vbios.nbp_state_change_latency = bw_int_to_fixed(39);
+		vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+		vbios.scatter_gather_enable = false;
+		vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+		vbios.cursor_width = 32;
+		vbios.average_compression_rate = 4;
+		vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 8;
+		vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+		vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+		dceip.large_cursor = false;
+		dceip.dmif_request_buffer_size = bw_int_to_fixed(2304);
+		dceip.dmif_pipe_en_fbc_chunk_tracker = true;
+		dceip.cursor_max_outstanding_group_num = 1;
+		dceip.lines_interleaved_into_lb = 2;
+		dceip.chunk_width = 256;
+		dceip.number_of_graphics_pipes = 6;
+		dceip.number_of_underlay_pipes = 0;
+		dceip.low_power_tiling_mode = 0;
+		dceip.display_write_back_supported = true;
+		dceip.argb_compression_support = true;
+		dceip.underlay_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35556, 10000);
+		dceip.underlay_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.underlay_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.underlay_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.graphics_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35, 10);
+		dceip.graphics_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.graphics_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.graphics_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+		dceip.max_dmif_buffer_allocated = 4;
+		dceip.graphics_dmif_size = 24576;
+		dceip.underlay_luma_dmif_size = 19456;
+		dceip.underlay_chroma_dmif_size = 23552;
+		dceip.pre_downscaler_enabled = true;
+		dceip.underlay_downscale_prefetch_enabled = false;
+		dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+		dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
+		dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
+		dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+			bw_int_to_fixed(1);
+		dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.underlay420_chroma_lb_size_per_component =
+			bw_int_to_fixed(164352);
+		dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.cursor_chunk_width = bw_int_to_fixed(64);
+		dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+		dceip.underlay_maximum_width_efficient_for_tiling =
+			bw_int_to_fixed(1920);
+		dceip.underlay_maximum_height_efficient_for_tiling =
+			bw_int_to_fixed(1080);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+			bw_frc_to_fixed(3, 10);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+			bw_int_to_fixed(25);
+		dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+			2);
+		dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+			bw_int_to_fixed(128);
+		dceip.limit_excessive_outstanding_dmif_requests = true;
+		dceip.linear_mode_line_request_alternation_slice =
+			bw_int_to_fixed(64);
+		dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+			32;
+		dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+		dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+		dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+		dceip.dispclk_per_request = bw_int_to_fixed(2);
+		dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+		dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+		dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+		dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+		break;
+	default:
+		break;
+	}
+	*bw_dceip = dceip;
+	*bw_vbios = vbios;
+
+}
+
+/**
+ * Compare calculated (required) clocks against the clocks available at
+ * maximum voltage (max Performance Level).
+ */
+static bool is_display_configuration_supported(
+	const struct bw_calcs_vbios *vbios,
+	const struct dce_bw_output *calcs_output)
+{
+	uint32_t int_max_clk;
+
+	int_max_clk = bw_fixed_to_int(vbios->high_voltage_max_dispclk);
+	int_max_clk *= 1000; /* MHz to kHz */
+	if (calcs_output->dispclk_khz > int_max_clk)
+		return false;
+
+	int_max_clk = bw_fixed_to_int(vbios->high_sclk);
+	int_max_clk *= 1000; /* MHz to kHz */
+	if (calcs_output->sclk_khz > int_max_clk)
+		return false;
+
+	return true;
+}
+
+static void populate_initial_data(
+	const struct pipe_ctx pipe[], int pipe_count, struct bw_calcs_data *data)
+{
+	int i, j;
+	int num_displays = 0;
+
+	data->underlay_surface_type = bw_def_420;
+	data->panning_and_bezel_adjustment = bw_def_none;
+	data->graphics_lb_bpc = 10;
+	data->underlay_lb_bpc = 8;
+	data->underlay_tiling_mode = bw_def_tiled;
+	data->graphics_tiling_mode = bw_def_tiled;
+	data->underlay_micro_tile_mode = bw_def_display_micro_tiling;
+	data->graphics_micro_tile_mode = bw_def_display_micro_tiling;
+
+	/* Pipes with underlay first */
+	for (i = 0; i < pipe_count; i++) {
+		if (!pipe[i].stream || !pipe[i].bottom_pipe)
+			continue;
+
+		ASSERT(pipe[i].plane_state);
+
+		if (num_displays == 0) {
+			if (!pipe[i].plane_state->visible)
+				data->d0_underlay_mode = bw_def_underlay_only;
+			else
+				data->d0_underlay_mode = bw_def_blend;
+		} else {
+			if (!pipe[i].plane_state->visible)
+				data->d1_underlay_mode = bw_def_underlay_only;
+			else
+				data->d1_underlay_mode = bw_def_blend;
+		}
+
+		data->fbc_en[num_displays + 4] = false;
+		data->lpt_en[num_displays + 4] = false;
+		data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total);
+		data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total);
+		data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_khz, 1000);
+		data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width);
+		data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
+		data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height);
+		data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.h_taps);
+		data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.v_taps);
+		data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.horz.value);
+		data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.vert.value);
+		switch (pipe[i].plane_state->rotation) {
+		case ROTATION_ANGLE_0:
+			data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+			break;
+		case ROTATION_ANGLE_90:
+			data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90);
+			break;
+		case ROTATION_ANGLE_180:
+			data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180);
+			break;
+		case ROTATION_ANGLE_270:
+			data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270);
+			break;
+		default:
+			break;
+		}
+		switch (pipe[i].plane_state->format) {
+		case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+		case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+		case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+			data->bytes_per_pixel[num_displays + 4] = 2;
+			break;
+		case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+		case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+		case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+		case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+		case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+		case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+		case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+			data->bytes_per_pixel[num_displays + 4] = 4;
+			break;
+		case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+		case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+			data->bytes_per_pixel[num_displays + 4] = 8;
+			break;
+		default:
+			data->bytes_per_pixel[num_displays + 4] = 4;
+			break;
+		}
+		data->interlace_mode[num_displays + 4] = false;
+		data->stereo_mode[num_displays + 4] = bw_def_mono;
+
+
+		for (j = 0; j < 2; j++) {
+			data->fbc_en[num_displays * 2 + j] = false;
+			data->lpt_en[num_displays * 2 + j] = false;
+
+			data->src_height[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.height);
+			data->src_width[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.width);
+			data->pitch_in_pixels[num_displays * 2 + j] = bw_int_to_fixed(
+					pipe[i].bottom_pipe->plane_state->plane_size.grph.surface_pitch);
+			data->h_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.h_taps);
+			data->v_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.v_taps);
+			data->h_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed(
+					pipe[i].bottom_pipe->plane_res.scl_data.ratios.horz.value);
+			data->v_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed(
+					pipe[i].bottom_pipe->plane_res.scl_data.ratios.vert.value);
+			switch (pipe[i].bottom_pipe->plane_state->rotation) {
+			case ROTATION_ANGLE_0:
+				data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(0);
+				break;
+			case ROTATION_ANGLE_90:
+				data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(90);
+				break;
+			case ROTATION_ANGLE_180:
+				data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(180);
+				break;
+			case ROTATION_ANGLE_270:
+				data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(270);
+				break;
+			default:
+				break;
+			}
+			data->stereo_mode[num_displays * 2 + j] = bw_def_mono;
+		}
+
+		num_displays++;
+	}
+
+	/* Pipes without underlay after */
+	for (i = 0; i < pipe_count; i++) {
+		if (!pipe[i].stream || pipe[i].bottom_pipe)
+			continue;
+
+
+		data->fbc_en[num_displays + 4] = false;
+		data->lpt_en[num_displays + 4] = false;
+		data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total);
+		data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total);
+		data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_khz, 1000);
+		if (pipe[i].plane_state) {
+			data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width);
+			data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
+			data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height);
+			data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.h_taps);
+			data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.v_taps);
+			data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.horz.value);
+			data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.vert.value);
+			switch (pipe[i].plane_state->rotation) {
+			case ROTATION_ANGLE_0:
+				data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+				break;
+			case ROTATION_ANGLE_90:
+				data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90);
+				break;
+			case ROTATION_ANGLE_180:
+				data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180);
+				break;
+			case ROTATION_ANGLE_270:
+				data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270);
+				break;
+			default:
+				break;
+			}
+			switch (pipe[i].plane_state->format) {
+			case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+			case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+			case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+			case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+				data->bytes_per_pixel[num_displays + 4] = 2;
+				break;
+			case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+			case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+			case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+			case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+			case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+			case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+			case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+				data->bytes_per_pixel[num_displays + 4] = 4;
+				break;
+			case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+			case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+				data->bytes_per_pixel[num_displays + 4] = 8;
+				break;
+			default:
+				data->bytes_per_pixel[num_displays + 4] = 4;
+				break;
+			}
+		} else {
+			data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_addressable);
+			data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
+			data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_addressable);
+			data->h_taps[num_displays + 4] = bw_int_to_fixed(1);
+			data->v_taps[num_displays + 4] = bw_int_to_fixed(1);
+			data->h_scale_ratio[num_displays + 4] = bw_int_to_fixed(1);
+			data->v_scale_ratio[num_displays + 4] = bw_int_to_fixed(1);
+			data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+			data->bytes_per_pixel[num_displays + 4] = 4;
+		}
+
+		data->interlace_mode[num_displays + 4] = false;
+		data->stereo_mode[num_displays + 4] = bw_def_mono;
+		num_displays++;
+	}
+
+	data->number_of_displays = num_displays;
+}
+
+/**
+ * Return:
+ *	true -	Display(s) configuration supported.
+ *		In this case 'calcs_output' contains data for HW programming
+ *	false - Display(s) configuration not supported (not enough bandwidth).
+ */
+
+bool bw_calcs(struct dc_context *ctx,
+	const struct bw_calcs_dceip *dceip,
+	const struct bw_calcs_vbios *vbios,
+	const struct pipe_ctx pipe[],
+	int pipe_count,
+	struct dce_bw_output *calcs_output)
+{
+	struct bw_calcs_data *data = kzalloc(sizeof(struct bw_calcs_data),
+					     GFP_KERNEL);
+	if (!data)
+		return false;
+
+	populate_initial_data(pipe, pipe_count, data);
+
+	/*TODO: this should be taken out calcs output and assigned during timing sync for pplib use*/
+	calcs_output->all_displays_in_sync = false;
+
+	if (data->number_of_displays != 0) {
+		uint8_t yclk_lvl, sclk_lvl;
+		struct bw_fixed high_sclk = vbios->high_sclk;
+		struct bw_fixed mid1_sclk = vbios->mid1_sclk;
+		struct bw_fixed mid2_sclk = vbios->mid2_sclk;
+		struct bw_fixed mid3_sclk = vbios->mid3_sclk;
+		struct bw_fixed mid4_sclk = vbios->mid4_sclk;
+		struct bw_fixed mid5_sclk = vbios->mid5_sclk;
+		struct bw_fixed mid6_sclk = vbios->mid6_sclk;
+		struct bw_fixed low_sclk = vbios->low_sclk;
+		struct bw_fixed high_yclk = vbios->high_yclk;
+		struct bw_fixed mid_yclk = vbios->mid_yclk;
+		struct bw_fixed low_yclk = vbios->low_yclk;
+
+		calculate_bandwidth(dceip, vbios, data);
+
+		yclk_lvl = data->y_clk_level;
+		sclk_lvl = data->sclk_level;
+
+		calcs_output->nbp_state_change_enable =
+			data->nbp_state_change_enable;
+		calcs_output->cpuc_state_change_enable =
+				data->cpuc_state_change_enable;
+		calcs_output->cpup_state_change_enable =
+				data->cpup_state_change_enable;
+		calcs_output->stutter_mode_enable =
+				data->stutter_mode_enable;
+		calcs_output->dispclk_khz =
+			bw_fixed_to_int(bw_mul(data->dispclk,
+					bw_int_to_fixed(1000)));
+		calcs_output->blackout_recovery_time_us =
+			bw_fixed_to_int(data->blackout_recovery_time);
+		calcs_output->sclk_khz =
+			bw_fixed_to_int(bw_mul(data->required_sclk,
+					bw_int_to_fixed(1000)));
+		calcs_output->sclk_deep_sleep_khz =
+			bw_fixed_to_int(bw_mul(data->sclk_deep_sleep,
+					bw_int_to_fixed(1000)));
+		if (yclk_lvl == 0)
+			calcs_output->yclk_khz = bw_fixed_to_int(
+				bw_mul(low_yclk, bw_int_to_fixed(1000)));
+		else if (yclk_lvl == 1)
+			calcs_output->yclk_khz = bw_fixed_to_int(
+				bw_mul(mid_yclk, bw_int_to_fixed(1000)));
+		else
+			calcs_output->yclk_khz = bw_fixed_to_int(
+				bw_mul(high_yclk, bw_int_to_fixed(1000)));
+
+		/* units: nanosecond, 16bit storage. */
+
+		calcs_output->nbp_state_change_wm_ns[0].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
+		calcs_output->nbp_state_change_wm_ns[1].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+		calcs_output->nbp_state_change_wm_ns[2].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+
+		if (ctx->dc->caps.max_slave_planes) {
+			calcs_output->nbp_state_change_wm_ns[3].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+			calcs_output->nbp_state_change_wm_ns[4].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+							nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+		} else {
+			calcs_output->nbp_state_change_wm_ns[3].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+			calcs_output->nbp_state_change_wm_ns[4].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+		}
+		calcs_output->nbp_state_change_wm_ns[5].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+		calcs_output->stutter_exit_wm_ns[0].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+		calcs_output->stutter_exit_wm_ns[1].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+		calcs_output->stutter_exit_wm_ns[2].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+		if (ctx->dc->caps.max_slave_planes) {
+			calcs_output->stutter_exit_wm_ns[3].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+			calcs_output->stutter_exit_wm_ns[4].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+		} else {
+			calcs_output->stutter_exit_wm_ns[3].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+			calcs_output->stutter_exit_wm_ns[4].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+		}
+		calcs_output->stutter_exit_wm_ns[5].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+		calcs_output->urgent_wm_ns[0].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				urgent_watermark[4], bw_int_to_fixed(1000)));
+		calcs_output->urgent_wm_ns[1].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				urgent_watermark[5], bw_int_to_fixed(1000)));
+		calcs_output->urgent_wm_ns[2].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				urgent_watermark[6], bw_int_to_fixed(1000)));
+		if (ctx->dc->caps.max_slave_planes) {
+			calcs_output->urgent_wm_ns[3].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[0], bw_int_to_fixed(1000)));
+			calcs_output->urgent_wm_ns[4].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[1], bw_int_to_fixed(1000)));
+		} else {
+			calcs_output->urgent_wm_ns[3].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[7], bw_int_to_fixed(1000)));
+			calcs_output->urgent_wm_ns[4].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[8], bw_int_to_fixed(1000)));
+		}
+		calcs_output->urgent_wm_ns[5].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				urgent_watermark[9], bw_int_to_fixed(1000)));
+
+		if (dceip->version != BW_CALCS_VERSION_CARRIZO) {
+			((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk;
+			calculate_bandwidth(dceip, vbios, data);
+
+			calcs_output->nbp_state_change_wm_ns[0].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[4],bw_int_to_fixed(1000)));
+			calcs_output->nbp_state_change_wm_ns[1].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+			calcs_output->nbp_state_change_wm_ns[2].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+
+			if (ctx->dc->caps.max_slave_planes) {
+				calcs_output->nbp_state_change_wm_ns[3].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+				calcs_output->nbp_state_change_wm_ns[4].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+			} else {
+				calcs_output->nbp_state_change_wm_ns[3].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+				calcs_output->nbp_state_change_wm_ns[4].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+			}
+			calcs_output->nbp_state_change_wm_ns[5].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+			calcs_output->stutter_exit_wm_ns[0].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+			calcs_output->stutter_exit_wm_ns[1].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+			calcs_output->stutter_exit_wm_ns[2].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+			if (ctx->dc->caps.max_slave_planes) {
+				calcs_output->stutter_exit_wm_ns[3].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+				calcs_output->stutter_exit_wm_ns[4].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+			} else {
+				calcs_output->stutter_exit_wm_ns[3].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+				calcs_output->stutter_exit_wm_ns[4].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+			}
+			calcs_output->stutter_exit_wm_ns[5].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+			calcs_output->urgent_wm_ns[0].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[4], bw_int_to_fixed(1000)));
+			calcs_output->urgent_wm_ns[1].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[5], bw_int_to_fixed(1000)));
+			calcs_output->urgent_wm_ns[2].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[6], bw_int_to_fixed(1000)));
+			if (ctx->dc->caps.max_slave_planes) {
+				calcs_output->urgent_wm_ns[3].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						urgent_watermark[0], bw_int_to_fixed(1000)));
+				calcs_output->urgent_wm_ns[4].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						urgent_watermark[1], bw_int_to_fixed(1000)));
+			} else {
+				calcs_output->urgent_wm_ns[3].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						urgent_watermark[7], bw_int_to_fixed(1000)));
+				calcs_output->urgent_wm_ns[4].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						urgent_watermark[8], bw_int_to_fixed(1000)));
+			}
+			calcs_output->urgent_wm_ns[5].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[9], bw_int_to_fixed(1000)));
+
+			((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk;
+			((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk;
+			calculate_bandwidth(dceip, vbios, data);
+
+			calcs_output->nbp_state_change_wm_ns[0].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
+			calcs_output->nbp_state_change_wm_ns[1].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+			calcs_output->nbp_state_change_wm_ns[2].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+			if (ctx->dc->caps.max_slave_planes) {
+				calcs_output->nbp_state_change_wm_ns[3].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+				calcs_output->nbp_state_change_wm_ns[4].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+			} else {
+				calcs_output->nbp_state_change_wm_ns[3].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+				calcs_output->nbp_state_change_wm_ns[4].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+			}
+			calcs_output->nbp_state_change_wm_ns[5].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+
+			calcs_output->stutter_exit_wm_ns[0].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+			calcs_output->stutter_exit_wm_ns[1].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+			calcs_output->stutter_exit_wm_ns[2].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+			if (ctx->dc->caps.max_slave_planes) {
+				calcs_output->stutter_exit_wm_ns[3].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+				calcs_output->stutter_exit_wm_ns[4].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+			} else {
+				calcs_output->stutter_exit_wm_ns[3].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+				calcs_output->stutter_exit_wm_ns[4].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+			}
+			calcs_output->stutter_exit_wm_ns[5].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+			calcs_output->urgent_wm_ns[0].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[4], bw_int_to_fixed(1000)));
+			calcs_output->urgent_wm_ns[1].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[5], bw_int_to_fixed(1000)));
+			calcs_output->urgent_wm_ns[2].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[6], bw_int_to_fixed(1000)));
+			if (ctx->dc->caps.max_slave_planes) {
+				calcs_output->urgent_wm_ns[3].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						urgent_watermark[0], bw_int_to_fixed(1000)));
+				calcs_output->urgent_wm_ns[4].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						urgent_watermark[1], bw_int_to_fixed(1000)));
+			} else {
+				calcs_output->urgent_wm_ns[3].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						urgent_watermark[7], bw_int_to_fixed(1000)));
+				calcs_output->urgent_wm_ns[4].c_mark =
+					bw_fixed_to_int(bw_mul(data->
+						urgent_watermark[8], bw_int_to_fixed(1000)));
+			}
+			calcs_output->urgent_wm_ns[5].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[9], bw_int_to_fixed(1000)));
+		}
+
+		if (dceip->version == BW_CALCS_VERSION_CARRIZO) {
+			((struct bw_calcs_vbios *)vbios)->low_yclk = high_yclk;
+			((struct bw_calcs_vbios *)vbios)->mid_yclk = high_yclk;
+			((struct bw_calcs_vbios *)vbios)->low_sclk = high_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid1_sclk = high_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid2_sclk = high_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid3_sclk = high_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid4_sclk = high_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid5_sclk = high_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid6_sclk = high_sclk;
+		} else {
+			((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk;
+			((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk;
+			((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk;
+		}
+
+		calculate_bandwidth(dceip, vbios, data);
+
+		calcs_output->nbp_state_change_wm_ns[0].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
+		calcs_output->nbp_state_change_wm_ns[1].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+		calcs_output->nbp_state_change_wm_ns[2].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+		if (ctx->dc->caps.max_slave_planes) {
+			calcs_output->nbp_state_change_wm_ns[3].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+			calcs_output->nbp_state_change_wm_ns[4].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+		} else {
+			calcs_output->nbp_state_change_wm_ns[3].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+			calcs_output->nbp_state_change_wm_ns[4].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+		}
+		calcs_output->nbp_state_change_wm_ns[5].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+		calcs_output->stutter_exit_wm_ns[0].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+		calcs_output->stutter_exit_wm_ns[1].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+		calcs_output->stutter_exit_wm_ns[2].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+		if (ctx->dc->caps.max_slave_planes) {
+			calcs_output->stutter_exit_wm_ns[3].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+			calcs_output->stutter_exit_wm_ns[4].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+		} else {
+			calcs_output->stutter_exit_wm_ns[3].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+			calcs_output->stutter_exit_wm_ns[4].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+		}
+		calcs_output->stutter_exit_wm_ns[5].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+
+		calcs_output->urgent_wm_ns[0].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				urgent_watermark[4], bw_int_to_fixed(1000)));
+		calcs_output->urgent_wm_ns[1].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				urgent_watermark[5], bw_int_to_fixed(1000)));
+		calcs_output->urgent_wm_ns[2].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				urgent_watermark[6], bw_int_to_fixed(1000)));
+		if (ctx->dc->caps.max_slave_planes) {
+			calcs_output->urgent_wm_ns[3].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[0], bw_int_to_fixed(1000)));
+			calcs_output->urgent_wm_ns[4].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[1], bw_int_to_fixed(1000)));
+		} else {
+			calcs_output->urgent_wm_ns[3].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[7], bw_int_to_fixed(1000)));
+			calcs_output->urgent_wm_ns[4].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					urgent_watermark[8], bw_int_to_fixed(1000)));
+		}
+		calcs_output->urgent_wm_ns[5].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				urgent_watermark[9], bw_int_to_fixed(1000)));
+
+		((struct bw_calcs_vbios *)vbios)->low_yclk = low_yclk;
+		((struct bw_calcs_vbios *)vbios)->mid_yclk = mid_yclk;
+		((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk;
+		((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk;
+		((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk;
+		((struct bw_calcs_vbios *)vbios)->mid3_sclk = mid3_sclk;
+		((struct bw_calcs_vbios *)vbios)->mid4_sclk = mid4_sclk;
+		((struct bw_calcs_vbios *)vbios)->mid5_sclk = mid5_sclk;
+		((struct bw_calcs_vbios *)vbios)->mid6_sclk = mid6_sclk;
+		((struct bw_calcs_vbios *)vbios)->high_sclk = high_sclk;
+	} else {
+		calcs_output->nbp_state_change_enable = true;
+		calcs_output->cpuc_state_change_enable = true;
+		calcs_output->cpup_state_change_enable = true;
+		calcs_output->stutter_mode_enable = true;
+		calcs_output->dispclk_khz = 0;
+		calcs_output->sclk_khz = 0;
+	}
+
+	kfree(data);
+
+	return is_display_configuration_supported(vbios, calcs_output);
+}

+ 1899 - 0
drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c

@@ -0,0 +1,1899 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn_calc_auto.h"
+#include "dcn_calc_math.h"
+
+/*REVISION#250*/
+void scaler_settings_calculation(struct dcn_bw_internal_vars *v)
+{
+	int k;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->allow_different_hratio_vratio == dcn_bw_yes) {
+			if (v->source_scan[k] == dcn_bw_hor) {
+				v->h_ratio[k] = v->viewport_width[k] / v->scaler_rec_out_width[k];
+				v->v_ratio[k] = v->viewport_height[k] / v->scaler_recout_height[k];
+			}
+			else {
+				v->h_ratio[k] = v->viewport_height[k] / v->scaler_rec_out_width[k];
+				v->v_ratio[k] = v->viewport_width[k] / v->scaler_recout_height[k];
+			}
+		}
+		else {
+			if (v->source_scan[k] == dcn_bw_hor) {
+				v->h_ratio[k] =dcn_bw_max2(v->viewport_width[k] / v->scaler_rec_out_width[k], v->viewport_height[k] / v->scaler_recout_height[k]);
+			}
+			else {
+				v->h_ratio[k] =dcn_bw_max2(v->viewport_height[k] / v->scaler_rec_out_width[k], v->viewport_width[k] / v->scaler_recout_height[k]);
+			}
+			v->v_ratio[k] = v->h_ratio[k];
+		}
+		if (v->interlace_output[k] == 1.0) {
+			v->v_ratio[k] = 2.0 * v->v_ratio[k];
+		}
+		if ((v->underscan_output[k] == 1.0)) {
+			v->h_ratio[k] = v->h_ratio[k] * v->under_scan_factor;
+			v->v_ratio[k] = v->v_ratio[k] * v->under_scan_factor;
+		}
+	}
+	/*scaler taps calculation*/
+
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->h_ratio[k] > 1.0) {
+			v->acceptable_quality_hta_ps =dcn_bw_min2(v->max_hscl_taps, 2.0 *dcn_bw_ceil2(v->h_ratio[k], 1.0));
+		}
+		else if (v->h_ratio[k] < 1.0) {
+			v->acceptable_quality_hta_ps = 4.0;
+		}
+		else {
+			v->acceptable_quality_hta_ps = 1.0;
+		}
+		if (v->ta_pscalculation == dcn_bw_override) {
+			v->htaps[k] = v->override_hta_ps[k];
+		}
+		else {
+			v->htaps[k] = v->acceptable_quality_hta_ps;
+		}
+		if (v->v_ratio[k] > 1.0) {
+			v->acceptable_quality_vta_ps =dcn_bw_min2(v->max_vscl_taps, 2.0 *dcn_bw_ceil2(v->v_ratio[k], 1.0));
+		}
+		else if (v->v_ratio[k] < 1.0) {
+			v->acceptable_quality_vta_ps = 4.0;
+		}
+		else {
+			v->acceptable_quality_vta_ps = 1.0;
+		}
+		if (v->ta_pscalculation == dcn_bw_override) {
+			v->vtaps[k] = v->override_vta_ps[k];
+		}
+		else {
+			v->vtaps[k] = v->acceptable_quality_vta_ps;
+		}
+		if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+			v->vta_pschroma[k] = 0.0;
+			v->hta_pschroma[k] = 0.0;
+		}
+		else {
+			if (v->ta_pscalculation == dcn_bw_override) {
+				v->vta_pschroma[k] = v->override_vta_pschroma[k];
+				v->hta_pschroma[k] = v->override_hta_pschroma[k];
+			}
+			else {
+				v->vta_pschroma[k] = v->acceptable_quality_vta_ps;
+				v->hta_pschroma[k] = v->acceptable_quality_hta_ps;
+			}
+		}
+	}
+}
+
+void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
+{
+	int i;
+	int j;
+	int k;
+	/*mode support, voltage state and soc configuration*/
+
+	/*scale ratio support check*/
+
+	v->scale_ratio_support = dcn_bw_yes;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->h_ratio[k] > v->max_hscl_ratio || v->v_ratio[k] > v->max_vscl_ratio || v->h_ratio[k] > v->htaps[k] || v->v_ratio[k] > v->vtaps[k] || (v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16 && (v->h_ratio[k] / 2.0 > v->hta_pschroma[k] || v->v_ratio[k] / 2.0 > v->vta_pschroma[k]))) {
+			v->scale_ratio_support = dcn_bw_no;
+		}
+	}
+	/*source format, pixel format and scan support check*/
+
+	v->source_format_pixel_and_scan_support = dcn_bw_yes;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if ((v->source_surface_mode[k] == dcn_bw_sw_linear && v->source_scan[k] != dcn_bw_hor) || ((v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x || v->source_surface_mode[k] == dcn_bw_sw_var_d || v->source_surface_mode[k] == dcn_bw_sw_var_d_x) && v->source_pixel_format[k] != dcn_bw_rgb_sub_64)) {
+			v->source_format_pixel_and_scan_support = dcn_bw_no;
+		}
+	}
+	/*bandwidth support check*/
+
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->source_scan[k] == dcn_bw_hor) {
+			v->swath_width_ysingle_dpp[k] = v->viewport_width[k];
+		}
+		else {
+			v->swath_width_ysingle_dpp[k] = v->viewport_height[k];
+		}
+		if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+			v->byte_per_pixel_in_dety[k] = 8.0;
+			v->byte_per_pixel_in_detc[k] = 0.0;
+		}
+		else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
+			v->byte_per_pixel_in_dety[k] = 4.0;
+			v->byte_per_pixel_in_detc[k] = 0.0;
+		}
+		else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+			v->byte_per_pixel_in_dety[k] = 2.0;
+			v->byte_per_pixel_in_detc[k] = 0.0;
+		}
+		else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+			v->byte_per_pixel_in_dety[k] = 1.0;
+			v->byte_per_pixel_in_detc[k] = 2.0;
+		}
+		else {
+			v->byte_per_pixel_in_dety[k] = 4.0f / 3.0f;
+			v->byte_per_pixel_in_detc[k] = 8.0f / 3.0f;
+		}
+	}
+	v->total_read_bandwidth_consumed_gbyte_per_second = 0.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		v->read_bandwidth[k] = v->swath_width_ysingle_dpp[k] * (dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) * v->v_ratio[k] +dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0 * v->v_ratio[k] / 2) / (v->htotal[k] / v->pixel_clock[k]);
+		if (v->dcc_enable[k] == dcn_bw_yes) {
+			v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 256);
+		}
+		if (v->pte_enable == dcn_bw_yes && v->source_scan[k] != dcn_bw_hor && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x)) {
+			v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 64);
+		}
+		else if (v->pte_enable == dcn_bw_yes && v->source_scan[k] == dcn_bw_hor && (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32) && (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x)) {
+			v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 256);
+		}
+		else if (v->pte_enable == dcn_bw_yes) {
+			v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 512);
+		}
+		v->total_read_bandwidth_consumed_gbyte_per_second = v->total_read_bandwidth_consumed_gbyte_per_second + v->read_bandwidth[k] / 1000.0;
+	}
+	v->total_write_bandwidth_consumed_gbyte_per_second = 0.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444) {
+			v->write_bandwidth[k] = v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0;
+		}
+		else if (v->output[k] == dcn_bw_writeback) {
+			v->write_bandwidth[k] = v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 1.5;
+		}
+		else {
+			v->write_bandwidth[k] = 0.0;
+		}
+		v->total_write_bandwidth_consumed_gbyte_per_second = v->total_write_bandwidth_consumed_gbyte_per_second + v->write_bandwidth[k] / 1000.0;
+	}
+	v->total_bandwidth_consumed_gbyte_per_second = v->total_read_bandwidth_consumed_gbyte_per_second + v->total_write_bandwidth_consumed_gbyte_per_second;
+	v->dcc_enabled_in_any_plane = dcn_bw_no;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->dcc_enable[k] == dcn_bw_yes) {
+			v->dcc_enabled_in_any_plane = dcn_bw_yes;
+		}
+	}
+	for (i = 0; i <= number_of_states_plus_one; i++) {
+		v->return_bw_todcn_per_state =dcn_bw_min2(v->return_bus_width * v->dcfclk_per_state[i], v->fabric_and_dram_bandwidth_per_state[i] * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0);
+		v->return_bw_per_state[i] = v->return_bw_todcn_per_state;
+		if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->return_bw_todcn_per_state > v->dcfclk_per_state[i] * v->return_bus_width / 4.0) {
+			v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], v->return_bw_todcn_per_state * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bw_todcn_per_state - v->dcfclk_per_state[i] * v->return_bus_width / 4.0) + v->urgent_latency)));
+		}
+		v->critical_point = 2.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
+		if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->critical_point > 1.0 && v->critical_point < 4.0) {
+			v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], dcn_bw_pow(4.0 * v->return_bw_todcn_per_state * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
+		}
+		v->return_bw_todcn_per_state =dcn_bw_min2(v->return_bus_width * v->dcfclk_per_state[i], v->fabric_and_dram_bandwidth_per_state[i] * 1000.0);
+		if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->return_bw_todcn_per_state > v->dcfclk_per_state[i] * v->return_bus_width / 4.0) {
+			v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], v->return_bw_todcn_per_state * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bw_todcn_per_state - v->dcfclk_per_state[i] * v->return_bus_width / 4.0) + v->urgent_latency)));
+		}
+		v->critical_point = 2.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
+		if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->critical_point > 1.0 && v->critical_point < 4.0) {
+			v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], dcn_bw_pow(4.0 * v->return_bw_todcn_per_state * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
+		}
+	}
+	for (i = 0; i <= number_of_states_plus_one; i++) {
+		if ((v->total_read_bandwidth_consumed_gbyte_per_second * 1000.0 <= v->return_bw_per_state[i]) && (v->total_bandwidth_consumed_gbyte_per_second * 1000.0 <= v->fabric_and_dram_bandwidth_per_state[i] * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0)) {
+			v->bandwidth_support[i] = dcn_bw_yes;
+		}
+		else {
+			v->bandwidth_support[i] = dcn_bw_no;
+		}
+	}
+	/*writeback latency support check*/
+
+	v->writeback_latency_support = dcn_bw_yes;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444 && v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0 > (v->writeback_luma_buffer_size + v->writeback_chroma_buffer_size) * 1024.0 / v->write_back_latency) {
+			v->writeback_latency_support = dcn_bw_no;
+		}
+		else if (v->output[k] == dcn_bw_writeback && v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) >dcn_bw_min2(v->writeback_luma_buffer_size, 2.0 * v->writeback_chroma_buffer_size) * 1024.0 / v->write_back_latency) {
+			v->writeback_latency_support = dcn_bw_no;
+		}
+	}
+	/*re-ordering buffer support check*/
+
+	for (i = 0; i <= number_of_states_plus_one; i++) {
+		v->urgent_round_trip_and_out_of_order_latency_per_state[i] = (v->round_trip_ping_latency_cycles + 32.0) / v->dcfclk_per_state[i] + v->urgent_out_of_order_return_per_channel * v->number_of_channels / v->return_bw_per_state[i];
+		if ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / v->return_bw_per_state[i] > v->urgent_round_trip_and_out_of_order_latency_per_state[i]) {
+			v->rob_support[i] = dcn_bw_yes;
+		}
+		else {
+			v->rob_support[i] = dcn_bw_no;
+		}
+	}
+	/*display io support check*/
+
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->output[k] == dcn_bw_dp && v->dsc_capability == dcn_bw_yes) {
+			if (v->output_format[k] == dcn_bw_420) {
+				v->required_output_bw = v->pixel_clock[k] / 2.0;
+			}
+			else {
+				v->required_output_bw = v->pixel_clock[k];
+			}
+		}
+		else if (v->output_format[k] == dcn_bw_420) {
+			v->required_output_bw = v->pixel_clock[k] * 3.0 / 2.0;
+		}
+		else {
+			v->required_output_bw = v->pixel_clock[k] * 3.0;
+		}
+		if (v->output[k] == dcn_bw_hdmi) {
+			v->required_phyclk[k] = v->required_output_bw;
+			switch (v->output_deep_color[k]) {
+			case dcn_bw_encoder_10bpc:
+				v->required_phyclk[k] =  v->required_phyclk[k] * 5.0 / 4;
+			break;
+			case dcn_bw_encoder_12bpc:
+				v->required_phyclk[k] =  v->required_phyclk[k] * 3.0 / 2;
+				break;
+			default:
+				break;
+			}
+			v->required_phyclk[k] = v->required_phyclk[k] / 3.0;
+		}
+		else if (v->output[k] == dcn_bw_dp) {
+			v->required_phyclk[k] = v->required_output_bw / 4.0;
+		}
+		else {
+			v->required_phyclk[k] = 0.0;
+		}
+	}
+	for (i = 0; i <= number_of_states_plus_one; i++) {
+		v->dio_support[i] = dcn_bw_yes;
+		for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+			if (v->required_phyclk[k] > v->phyclk_per_state[i] || (v->output[k] == dcn_bw_hdmi && v->required_phyclk[k] > 600.0)) {
+				v->dio_support[i] = dcn_bw_no;
+			}
+		}
+	}
+	/*total available writeback support check*/
+
+	v->total_number_of_active_writeback = 0.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->output[k] == dcn_bw_writeback) {
+			v->total_number_of_active_writeback = v->total_number_of_active_writeback + 1.0;
+		}
+	}
+	if (v->total_number_of_active_writeback <= v->max_num_writeback) {
+		v->total_available_writeback_support = dcn_bw_yes;
+	}
+	else {
+		v->total_available_writeback_support = dcn_bw_no;
+	}
+	/*maximum dispclk/dppclk support check*/
+
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->h_ratio[k] > 1.0) {
+			v->pscl_factor[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] /dcn_bw_ceil2(v->htaps[k] / 6.0, 1.0));
+		}
+		else {
+			v->pscl_factor[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
+		}
+		if (v->byte_per_pixel_in_detc[k] == 0.0) {
+			v->pscl_factor_chroma[k] = 0.0;
+			v->min_dppclk_using_single_dpp[k] = v->pixel_clock[k] *dcn_bw_max3(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_factor[k], 1.0);
+		}
+		else {
+			if (v->h_ratio[k] / 2.0 > 1.0) {
+				v->pscl_factor_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] / 2.0 /dcn_bw_ceil2(v->hta_pschroma[k] / 6.0, 1.0));
+			}
+			else {
+				v->pscl_factor_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
+			}
+			v->min_dppclk_using_single_dpp[k] = v->pixel_clock[k] *dcn_bw_max5(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_factor[k], v->vta_pschroma[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k] / 2.0), v->h_ratio[k] * v->v_ratio[k] / 4.0 / v->pscl_factor_chroma[k], 1.0);
+		}
+	}
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+			if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+				v->read256_block_height_y[k] = 1.0;
+			}
+			else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+				v->read256_block_height_y[k] = 4.0;
+			}
+			else {
+				v->read256_block_height_y[k] = 8.0;
+			}
+			v->read256_block_width_y[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->read256_block_height_y[k];
+			v->read256_block_height_c[k] = 0.0;
+			v->read256_block_width_c[k] = 0.0;
+		}
+		else {
+			if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+				v->read256_block_height_y[k] = 1.0;
+				v->read256_block_height_c[k] = 1.0;
+			}
+			else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+				v->read256_block_height_y[k] = 16.0;
+				v->read256_block_height_c[k] = 8.0;
+			}
+			else {
+				v->read256_block_height_y[k] = 8.0;
+				v->read256_block_height_c[k] = 8.0;
+			}
+			v->read256_block_width_y[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->read256_block_height_y[k];
+			v->read256_block_width_c[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->read256_block_height_c[k];
+		}
+		if (v->source_scan[k] == dcn_bw_hor) {
+			v->max_swath_height_y[k] = v->read256_block_height_y[k];
+			v->max_swath_height_c[k] = v->read256_block_height_c[k];
+		}
+		else {
+			v->max_swath_height_y[k] = v->read256_block_width_y[k];
+			v->max_swath_height_c[k] = v->read256_block_width_c[k];
+		}
+		if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+			if (v->source_surface_mode[k] == dcn_bw_sw_linear || (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_var_s || v->source_surface_mode[k] == dcn_bw_sw_var_s_x) && v->source_scan[k] == dcn_bw_hor)) {
+				v->min_swath_height_y[k] = v->max_swath_height_y[k];
+			}
+			else {
+				v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0;
+			}
+			v->min_swath_height_c[k] = v->max_swath_height_c[k];
+		}
+		else {
+			if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+				v->min_swath_height_y[k] = v->max_swath_height_y[k];
+				v->min_swath_height_c[k] = v->max_swath_height_c[k];
+			}
+			else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 && v->source_scan[k] == dcn_bw_hor) {
+				v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0;
+				if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
+					v->min_swath_height_c[k] = v->max_swath_height_c[k];
+				}
+				else {
+					v->min_swath_height_c[k] = v->max_swath_height_c[k] / 2.0;
+				}
+			}
+			else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10 && v->source_scan[k] == dcn_bw_hor) {
+				v->min_swath_height_c[k] = v->max_swath_height_c[k] / 2.0;
+				if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
+					v->min_swath_height_y[k] = v->max_swath_height_y[k];
+				}
+				else {
+					v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0;
+				}
+			}
+			else {
+				v->min_swath_height_y[k] = v->max_swath_height_y[k];
+				v->min_swath_height_c[k] = v->max_swath_height_c[k];
+			}
+		}
+		if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+			v->maximum_swath_width = 8192.0;
+		}
+		else {
+			v->maximum_swath_width = 5120.0;
+		}
+		v->number_of_dpp_required_for_det_size =dcn_bw_ceil2(v->swath_width_ysingle_dpp[k] /dcn_bw_min2(v->maximum_swath_width, v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / (v->byte_per_pixel_in_dety[k] * v->min_swath_height_y[k] + v->byte_per_pixel_in_detc[k] / 2.0 * v->min_swath_height_c[k])), 1.0);
+		if (v->byte_per_pixel_in_detc[k] == 0.0) {
+			v->number_of_dpp_required_for_lb_size =dcn_bw_ceil2((v->vtaps[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k], 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] /dcn_bw_max2(v->h_ratio[k], 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0);
+		}
+		else {
+			v->number_of_dpp_required_for_lb_size =dcn_bw_max2(dcn_bw_ceil2((v->vtaps[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k], 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] /dcn_bw_max2(v->h_ratio[k], 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0),dcn_bw_ceil2((v->vta_pschroma[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k] / 2.0, 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0));
+		}
+		v->number_of_dpp_required_for_det_and_lb_size[k] =dcn_bw_max2(v->number_of_dpp_required_for_det_size, v->number_of_dpp_required_for_lb_size);
+	}
+	for (i = 0; i <= number_of_states_plus_one; i++) {
+		for (j = 0; j <= 1; j++) {
+			v->total_number_of_active_dpp[i][j] = 0.0;
+			v->required_dispclk[i][j] = 0.0;
+			v->dispclk_dppclk_support[i][j] = dcn_bw_yes;
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				v->min_dispclk_using_single_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] * (j + 1)) * (1.0 + v->downspreading / 100.0);
+				if (v->odm_capability == dcn_bw_yes) {
+					v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k] / 2.0, v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0);
+				}
+				else {
+					v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0);
+				}
+				if (i < number_of_states) {
+					v->min_dispclk_using_single_dpp = v->min_dispclk_using_single_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
+					v->min_dispclk_using_dual_dpp = v->min_dispclk_using_dual_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
+				}
+				if (v->min_dispclk_using_single_dpp <=dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i]) && v->number_of_dpp_required_for_det_and_lb_size[k] <= 1.0) {
+					v->no_of_dpp[i][j][k] = 1.0;
+					v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_single_dpp);
+				}
+				else if (v->min_dispclk_using_dual_dpp <=dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) {
+					v->no_of_dpp[i][j][k] = 2.0;
+					v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp);
+				}
+				else {
+					v->no_of_dpp[i][j][k] = 2.0;
+					v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp);
+					v->dispclk_dppclk_support[i][j] = dcn_bw_no;
+				}
+				v->total_number_of_active_dpp[i][j] = v->total_number_of_active_dpp[i][j] + v->no_of_dpp[i][j][k];
+			}
+			if (v->total_number_of_active_dpp[i][j] > v->max_num_dpp) {
+				v->total_number_of_active_dpp[i][j] = 0.0;
+				v->required_dispclk[i][j] = 0.0;
+				v->dispclk_dppclk_support[i][j] = dcn_bw_yes;
+				for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+					v->min_dispclk_using_single_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] * (j + 1)) * (1.0 + v->downspreading / 100.0);
+					v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0);
+					if (i < number_of_states) {
+						v->min_dispclk_using_single_dpp = v->min_dispclk_using_single_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
+						v->min_dispclk_using_dual_dpp = v->min_dispclk_using_dual_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
+					}
+					if (v->number_of_dpp_required_for_det_and_lb_size[k] <= 1.0) {
+						v->no_of_dpp[i][j][k] = 1.0;
+						v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_single_dpp);
+						if (v->min_dispclk_using_single_dpp >dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) {
+							v->dispclk_dppclk_support[i][j] = dcn_bw_no;
+						}
+					}
+					else {
+						v->no_of_dpp[i][j][k] = 2.0;
+						v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp);
+						if (v->min_dispclk_using_dual_dpp >dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) {
+							v->dispclk_dppclk_support[i][j] = dcn_bw_no;
+						}
+					}
+					v->total_number_of_active_dpp[i][j] = v->total_number_of_active_dpp[i][j] + v->no_of_dpp[i][j][k];
+				}
+			}
+		}
+	}
+	/*viewport size check*/
+
+	v->viewport_size_support = dcn_bw_yes;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->number_of_dpp_required_for_det_and_lb_size[k] > 2.0) {
+			v->viewport_size_support = dcn_bw_no;
+		}
+	}
+	/*total available pipes support check*/
+
+	for (i = 0; i <= number_of_states_plus_one; i++) {
+		for (j = 0; j <= 1; j++) {
+			if (v->total_number_of_active_dpp[i][j] <= v->max_num_dpp) {
+				v->total_available_pipes_support[i][j] = dcn_bw_yes;
+			}
+			else {
+				v->total_available_pipes_support[i][j] = dcn_bw_no;
+			}
+		}
+	}
+	/*urgent latency support check*/
+
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		for (i = 0; i <= number_of_states_plus_one; i++) {
+			for (j = 0; j <= 1; j++) {
+				v->swath_width_yper_state[i][j][k] = v->swath_width_ysingle_dpp[k] / v->no_of_dpp[i][j][k];
+				v->swath_width_granularity_y = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->max_swath_height_y[k];
+				v->rounded_up_max_swath_size_bytes_y = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] - 1.0, v->swath_width_granularity_y) + v->swath_width_granularity_y) * v->byte_per_pixel_in_dety[k] * v->max_swath_height_y[k];
+				if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+					v->rounded_up_max_swath_size_bytes_y =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_y, 256.0) + 256;
+				}
+				if (v->max_swath_height_c[k] > 0.0) {
+					v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->max_swath_height_c[k];
+				}
+				v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pixel_in_detc[k] * v->max_swath_height_c[k];
+				if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+					v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+				}
+				if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) {
+					v->swath_height_yper_state[i][j][k] = v->max_swath_height_y[k];
+					v->swath_height_cper_state[i][j][k] = v->max_swath_height_c[k];
+				}
+				else {
+					v->swath_height_yper_state[i][j][k] = v->min_swath_height_y[k];
+					v->swath_height_cper_state[i][j][k] = v->min_swath_height_c[k];
+				}
+				if (v->byte_per_pixel_in_detc[k] == 0.0) {
+					v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k];
+					v->lines_in_det_chroma = 0.0;
+				}
+				else if (v->swath_height_yper_state[i][j][k] <= v->swath_height_cper_state[i][j][k]) {
+					v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k];
+					v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / v->byte_per_pixel_in_detc[k] / (v->swath_width_yper_state[i][j][k] / 2.0);
+				}
+				else {
+					v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 * 2.0 / 3.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k];
+					v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 3.0 / v->byte_per_pixel_in_dety[k] / (v->swath_width_yper_state[i][j][k] / 2.0);
+				}
+				v->effective_lb_latency_hiding_source_lines_luma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0);
+				v->effective_lb_latency_hiding_source_lines_chroma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0);
+				v->effective_detlb_lines_luma =dcn_bw_floor2(v->lines_in_det_luma +dcn_bw_min2(v->lines_in_det_luma * v->required_dispclk[i][j] * v->byte_per_pixel_in_dety[k] * v->pscl_factor[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_yper_state[i][j][k]);
+				v->effective_detlb_lines_chroma =dcn_bw_floor2(v->lines_in_det_chroma +dcn_bw_min2(v->lines_in_det_chroma * v->required_dispclk[i][j] * v->byte_per_pixel_in_detc[k] * v->pscl_factor_chroma[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_cper_state[i][j][k]);
+				if (v->byte_per_pixel_in_detc[k] == 0.0) {
+					v->urgent_latency_support_us_per_state[i][j][k] = v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]);
+				}
+				else {
+					v->urgent_latency_support_us_per_state[i][j][k] =dcn_bw_min2(v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]), v->effective_detlb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_detlb_lines_chroma * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]));
+				}
+			}
+		}
+	}
+	for (i = 0; i <= number_of_states_plus_one; i++) {
+		for (j = 0; j <= 1; j++) {
+			v->urgent_latency_support[i][j] = dcn_bw_yes;
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				if (v->urgent_latency_support_us_per_state[i][j][k] < v->urgent_latency / 1.0) {
+					v->urgent_latency_support[i][j] = dcn_bw_no;
+				}
+			}
+		}
+	}
+	/*prefetch check*/
+
+	for (i = 0; i <= number_of_states_plus_one; i++) {
+		for (j = 0; j <= 1; j++) {
+			v->total_number_of_dcc_active_dpp[i][j] = 0.0;
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				if (v->dcc_enable[k] == dcn_bw_yes) {
+					v->total_number_of_dcc_active_dpp[i][j] = v->total_number_of_dcc_active_dpp[i][j] + v->no_of_dpp[i][j][k];
+				}
+			}
+		}
+	}
+	for (i = 0; i <= number_of_states_plus_one; i++) {
+		for (j = 0; j <= 1; j++) {
+			v->projected_dcfclk_deep_sleep = 8.0;
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, v->pixel_clock[k] / 16.0);
+				if (v->byte_per_pixel_in_detc[k] == 0.0) {
+					if (v->v_ratio[k] <= 1.0) {
+						v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 64.0 * v->h_ratio[k] * v->pixel_clock[k] / v->no_of_dpp[i][j][k]);
+					}
+					else {
+						v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 64.0 * v->pscl_factor[k] * v->required_dispclk[i][j] / (1 + j));
+					}
+				}
+				else {
+					if (v->v_ratio[k] <= 1.0) {
+						v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 32.0 * v->h_ratio[k] * v->pixel_clock[k] / v->no_of_dpp[i][j][k]);
+					}
+					else {
+						v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 32.0 * v->pscl_factor[k] * v->required_dispclk[i][j] / (1 + j));
+					}
+					if (v->v_ratio[k] / 2.0 <= 1.0) {
+						v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 32.0 * v->h_ratio[k] / 2.0 * v->pixel_clock[k] / v->no_of_dpp[i][j][k]);
+					}
+					else {
+						v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 32.0 * v->pscl_factor_chroma[k] * v->required_dispclk[i][j] / (1 + j));
+					}
+				}
+			}
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				if (v->dcc_enable[k] == dcn_bw_yes) {
+					v->meta_req_height_y = 8.0 * v->read256_block_height_y[k];
+					v->meta_req_width_y = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->meta_req_height_y;
+					v->meta_surface_width_y =dcn_bw_ceil2(v->viewport_width[k] / v->no_of_dpp[i][j][k] - 1.0, v->meta_req_width_y) + v->meta_req_width_y;
+					v->meta_surface_height_y =dcn_bw_ceil2(v->viewport_height[k] - 1.0, v->meta_req_height_y) + v->meta_req_height_y;
+					if (v->pte_enable == dcn_bw_yes) {
+						v->meta_pte_bytes_per_frame_y = (dcn_bw_ceil2((v->meta_surface_width_y * v->meta_surface_height_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
+					}
+					else {
+						v->meta_pte_bytes_per_frame_y = 0.0;
+					}
+					if (v->source_scan[k] == dcn_bw_hor) {
+						v->meta_row_bytes_y = v->meta_surface_width_y * v->meta_req_height_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0;
+					}
+					else {
+						v->meta_row_bytes_y = v->meta_surface_height_y * v->meta_req_width_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0;
+					}
+				}
+				else {
+					v->meta_pte_bytes_per_frame_y = 0.0;
+					v->meta_row_bytes_y = 0.0;
+				}
+				if (v->pte_enable == dcn_bw_yes) {
+					if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+						v->macro_tile_block_size_bytes_y = 256.0;
+						v->macro_tile_block_height_y = 1.0;
+					}
+					else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
+						v->macro_tile_block_size_bytes_y = 4096.0;
+						v->macro_tile_block_height_y = 4.0 * v->read256_block_height_y[k];
+					}
+					else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
+						v->macro_tile_block_size_bytes_y = 64.0 * 1024;
+						v->macro_tile_block_height_y = 16.0 * v->read256_block_height_y[k];
+					}
+					else {
+						v->macro_tile_block_size_bytes_y = 256.0 * 1024;
+						v->macro_tile_block_height_y = 32.0 * v->read256_block_height_y[k];
+					}
+					if (v->macro_tile_block_size_bytes_y <= 65536.0) {
+						v->data_pte_req_height_y = v->macro_tile_block_height_y;
+					}
+					else {
+						v->data_pte_req_height_y = 16.0 * v->read256_block_height_y[k];
+					}
+					v->data_pte_req_width_y = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->data_pte_req_height_y * 8;
+					if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+						v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] *dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->data_pte_req_width_y / (v->viewport_width[k] / v->no_of_dpp[i][j][k]), 2.0), 1.0))) - 1.0) / v->data_pte_req_width_y, 1.0) + 1);
+					}
+					else if (v->source_scan[k] == dcn_bw_hor) {
+						v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] - 1.0) / v->data_pte_req_width_y, 1.0) + 1);
+					}
+					else {
+						v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] - 1.0) / v->data_pte_req_height_y, 1.0) + 1);
+					}
+				}
+				else {
+					v->dpte_bytes_per_row_y = 0.0;
+				}
+				if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
+					if (v->dcc_enable[k] == dcn_bw_yes) {
+						v->meta_req_height_c = 8.0 * v->read256_block_height_c[k];
+						v->meta_req_width_c = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->meta_req_height_c;
+						v->meta_surface_width_c =dcn_bw_ceil2(v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 - 1.0, v->meta_req_width_c) + v->meta_req_width_c;
+						v->meta_surface_height_c =dcn_bw_ceil2(v->viewport_height[k] / 2.0 - 1.0, v->meta_req_height_c) + v->meta_req_height_c;
+						if (v->pte_enable == dcn_bw_yes) {
+							v->meta_pte_bytes_per_frame_c = (dcn_bw_ceil2((v->meta_surface_width_c * v->meta_surface_height_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
+						}
+						else {
+							v->meta_pte_bytes_per_frame_c = 0.0;
+						}
+						if (v->source_scan[k] == dcn_bw_hor) {
+							v->meta_row_bytes_c = v->meta_surface_width_c * v->meta_req_height_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0;
+						}
+						else {
+							v->meta_row_bytes_c = v->meta_surface_height_c * v->meta_req_width_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0;
+						}
+					}
+					else {
+						v->meta_pte_bytes_per_frame_c = 0.0;
+						v->meta_row_bytes_c = 0.0;
+					}
+					if (v->pte_enable == dcn_bw_yes) {
+						if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+							v->macro_tile_block_size_bytes_c = 256.0;
+							v->macro_tile_block_height_c = 1.0;
+						}
+						else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
+							v->macro_tile_block_size_bytes_c = 4096.0;
+							v->macro_tile_block_height_c = 4.0 * v->read256_block_height_c[k];
+						}
+						else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
+							v->macro_tile_block_size_bytes_c = 64.0 * 1024;
+							v->macro_tile_block_height_c = 16.0 * v->read256_block_height_c[k];
+						}
+						else {
+							v->macro_tile_block_size_bytes_c = 256.0 * 1024;
+							v->macro_tile_block_height_c = 32.0 * v->read256_block_height_c[k];
+						}
+						v->macro_tile_block_width_c = v->macro_tile_block_size_bytes_c /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->macro_tile_block_height_c;
+						if (v->macro_tile_block_size_bytes_c <= 65536.0) {
+							v->data_pte_req_height_c = v->macro_tile_block_height_c;
+						}
+						else {
+							v->data_pte_req_height_c = 16.0 * v->read256_block_height_c[k];
+						}
+						v->data_pte_req_width_c = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->data_pte_req_height_c * 8;
+						if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+							v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 * dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->data_pte_req_width_c / (v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0), 2.0), 1.0))) - 1.0) / v->data_pte_req_width_c, 1.0) + 1);
+						}
+						else if (v->source_scan[k] == dcn_bw_hor) {
+							v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 - 1.0) / v->data_pte_req_width_c, 1.0) + 1);
+						}
+						else {
+							v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] / 2.0 - 1.0) / v->data_pte_req_height_c, 1.0) + 1);
+						}
+					}
+					else {
+						v->dpte_bytes_per_row_c = 0.0;
+					}
+				}
+				else {
+					v->dpte_bytes_per_row_c = 0.0;
+					v->meta_pte_bytes_per_frame_c = 0.0;
+					v->meta_row_bytes_c = 0.0;
+				}
+				v->dpte_bytes_per_row[k] = v->dpte_bytes_per_row_y + v->dpte_bytes_per_row_c;
+				v->meta_pte_bytes_per_frame[k] = v->meta_pte_bytes_per_frame_y + v->meta_pte_bytes_per_frame_c;
+				v->meta_row_bytes[k] = v->meta_row_bytes_y + v->meta_row_bytes_c;
+				v->v_init_y = (v->v_ratio[k] + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k]) / 2.0;
+				v->prefill_y[k] =dcn_bw_floor2(v->v_init_y, 1.0);
+				v->max_num_sw_y[k] =dcn_bw_ceil2((v->prefill_y[k] - 1.0) / v->swath_height_yper_state[i][j][k], 1.0) + 1;
+				if (v->prefill_y[k] > 1.0) {
+					v->max_partial_sw_y =dcn_bw_mod((v->prefill_y[k] - 2.0), v->swath_height_yper_state[i][j][k]);
+				}
+				else {
+					v->max_partial_sw_y =dcn_bw_mod((v->prefill_y[k] + v->swath_height_yper_state[i][j][k] - 2.0), v->swath_height_yper_state[i][j][k]);
+				}
+				v->max_partial_sw_y =dcn_bw_max2(1.0, v->max_partial_sw_y);
+				v->prefetch_lines_y[k] = v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k] + v->max_partial_sw_y;
+				if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
+					v->v_init_c = (v->v_ratio[k] / 2.0 + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k] / 2.0) / 2.0;
+					v->prefill_c[k] =dcn_bw_floor2(v->v_init_c, 1.0);
+					v->max_num_sw_c[k] =dcn_bw_ceil2((v->prefill_c[k] - 1.0) / v->swath_height_cper_state[i][j][k], 1.0) + 1;
+					if (v->prefill_c[k] > 1.0) {
+						v->max_partial_sw_c =dcn_bw_mod((v->prefill_c[k] - 2.0), v->swath_height_cper_state[i][j][k]);
+					}
+					else {
+						v->max_partial_sw_c =dcn_bw_mod((v->prefill_c[k] + v->swath_height_cper_state[i][j][k] - 2.0), v->swath_height_cper_state[i][j][k]);
+					}
+					v->max_partial_sw_c =dcn_bw_max2(1.0, v->max_partial_sw_c);
+					v->prefetch_lines_c[k] = v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k] + v->max_partial_sw_c;
+				}
+				else {
+					v->prefetch_lines_c[k] = 0.0;
+				}
+				v->dst_x_after_scaler = 90.0 * v->pixel_clock[k] / (v->required_dispclk[i][j] / (j + 1)) + 42.0 * v->pixel_clock[k] / v->required_dispclk[i][j];
+				if (v->no_of_dpp[i][j][k] > 1.0) {
+					v->dst_x_after_scaler = v->dst_x_after_scaler + v->scaler_rec_out_width[k] / 2.0;
+				}
+				if (v->output_format[k] == dcn_bw_420) {
+					v->dst_y_after_scaler = 1.0;
+				}
+				else {
+					v->dst_y_after_scaler = 0.0;
+				}
+				v->time_calc = 24.0 / v->projected_dcfclk_deep_sleep;
+				v->v_update_offset[k] =dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
+				v->total_repeater_delay = v->max_inter_dcn_tile_repeaters * (2.0 / (v->required_dispclk[i][j] / (j + 1)) + 3.0 / v->required_dispclk[i][j]);
+				v->v_update_width[k] = (14.0 / v->projected_dcfclk_deep_sleep + 12.0 / (v->required_dispclk[i][j] / (j + 1)) + v->total_repeater_delay) * v->pixel_clock[k];
+				v->v_ready_offset[k] =dcn_bw_max2(150.0 / (v->required_dispclk[i][j] / (j + 1)), v->total_repeater_delay + 20.0 / v->projected_dcfclk_deep_sleep + 10.0 / (v->required_dispclk[i][j] / (j + 1))) * v->pixel_clock[k];
+				v->time_setup = (v->v_update_offset[k] + v->v_update_width[k] + v->v_ready_offset[k]) / v->pixel_clock[k];
+				v->extra_latency = v->urgent_round_trip_and_out_of_order_latency_per_state[i] + (v->total_number_of_active_dpp[i][j] * v->pixel_chunk_size_in_kbyte + v->total_number_of_dcc_active_dpp[i][j] * v->meta_chunk_size) * 1024.0 / v->return_bw_per_state[i];
+				if (v->pte_enable == dcn_bw_yes) {
+					v->extra_latency = v->extra_latency + v->total_number_of_active_dpp[i][j] * v->pte_chunk_size * 1024.0 / v->return_bw_per_state[i];
+				}
+				if (v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes) {
+					v->maximum_vstartup = v->vtotal[k] - v->vactive[k] - 1.0;
+				}
+				else {
+					v->maximum_vstartup = v->v_sync_plus_back_porch[k] - 1.0;
+				}
+				v->line_times_for_prefetch[k] = v->maximum_vstartup - v->urgent_latency / (v->htotal[k] / v->pixel_clock[k]) - (v->time_calc + v->time_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dst_y_after_scaler + v->dst_x_after_scaler / v->htotal[k]);
+				v->line_times_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->line_times_for_prefetch[k] + 0.125), 1.0) / 4;
+				v->prefetch_bw[k] = (v->meta_pte_bytes_per_frame[k] + 2.0 * v->meta_row_bytes[k] + 2.0 * v->dpte_bytes_per_row[k] + v->prefetch_lines_y[k] * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0)) / (v->line_times_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]);
+			}
+			v->bw_available_for_immediate_flip = v->return_bw_per_state[i];
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				v->bw_available_for_immediate_flip = v->bw_available_for_immediate_flip -dcn_bw_max2(v->read_bandwidth[k], v->prefetch_bw[k]);
+			}
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				v->total_immediate_flip_bytes[k] = 0.0;
+				if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+					v->total_immediate_flip_bytes[k] = v->total_immediate_flip_bytes[k] + v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k];
+				}
+			}
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) {
+					v->time_for_meta_pte_with_immediate_flip =dcn_bw_max5(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->meta_pte_bytes_per_frame[k] * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
+					v->time_for_meta_pte_without_immediate_flip =dcn_bw_max3(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->extra_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
+				}
+				else {
+					v->time_for_meta_pte_with_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0;
+					v->time_for_meta_pte_without_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0;
+				}
+				if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) {
+					v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max5((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency, 2.0 * v->urgent_latency);
+					v->time_for_meta_and_dpte_row_without_immediate_flip =dcn_bw_max3((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, v->extra_latency);
+				}
+				else {
+					v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency - v->time_for_meta_pte_with_immediate_flip);
+					v->time_for_meta_and_dpte_row_without_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, v->extra_latency - v->time_for_meta_pte_without_immediate_flip);
+				}
+				v->lines_for_meta_pte_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+				v->lines_for_meta_pte_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+				v->lines_for_meta_and_dpte_row_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+				v->lines_for_meta_and_dpte_row_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+				v->line_times_to_request_prefetch_pixel_data_with_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_with_immediate_flip[k] - v->lines_for_meta_and_dpte_row_with_immediate_flip[k];
+				v->line_times_to_request_prefetch_pixel_data_without_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_without_immediate_flip[k] - v->lines_for_meta_and_dpte_row_without_immediate_flip[k];
+				if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip > 0.0) {
+					v->v_ratio_pre_ywith_immediate_flip[i][j][k] = v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip;
+					if ((v->swath_height_yper_state[i][j][k] > 4.0)) {
+						if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0 > 0.0) {
+							v->v_ratio_pre_ywith_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_ywith_immediate_flip[i][j][k], (v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0));
+						}
+						else {
+							v->v_ratio_pre_ywith_immediate_flip[i][j][k] = 999999.0;
+						}
+					}
+					v->v_ratio_pre_cwith_immediate_flip[i][j][k] = v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip;
+					if ((v->swath_height_cper_state[i][j][k] > 4.0)) {
+						if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0 > 0.0) {
+							v->v_ratio_pre_cwith_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_cwith_immediate_flip[i][j][k], (v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0));
+						}
+						else {
+							v->v_ratio_pre_cwith_immediate_flip[i][j][k] = 999999.0;
+						}
+					}
+					v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k] = v->no_of_dpp[i][j][k] * (v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0) * v->swath_width_yper_state[i][j][k] / (v->htotal[k] / v->pixel_clock[k]);
+				}
+				else {
+					v->v_ratio_pre_ywith_immediate_flip[i][j][k] = 999999.0;
+					v->v_ratio_pre_cwith_immediate_flip[i][j][k] = 999999.0;
+					v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k] = 999999.0;
+				}
+				if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip > 0.0) {
+					v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip;
+					if ((v->swath_height_yper_state[i][j][k] > 4.0)) {
+						if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0 > 0.0) {
+							v->v_ratio_pre_ywithout_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_ywithout_immediate_flip[i][j][k], (v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0));
+						}
+						else {
+							v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = 999999.0;
+						}
+					}
+					v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip;
+					if ((v->swath_height_cper_state[i][j][k] > 4.0)) {
+						if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0 > 0.0) {
+							v->v_ratio_pre_cwithout_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_cwithout_immediate_flip[i][j][k], (v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0));
+						}
+						else {
+							v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = 999999.0;
+						}
+					}
+					v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k] = v->no_of_dpp[i][j][k] * (v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0) * v->swath_width_yper_state[i][j][k] / (v->htotal[k] / v->pixel_clock[k]);
+				}
+				else {
+					v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = 999999.0;
+					v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = 999999.0;
+					v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k] = 999999.0;
+				}
+			}
+			v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = 0.0;
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+					v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = v->maximum_read_bandwidth_with_prefetch_with_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k]) +dcn_bw_max2(v->meta_pte_bytes_per_frame[k] / (v->lines_for_meta_pte_with_immediate_flip[k] * v->htotal[k] / v->pixel_clock[k]), (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / (v->lines_for_meta_and_dpte_row_with_immediate_flip[k] * v->htotal[k] / v->pixel_clock[k]));
+				}
+				else {
+					v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = v->maximum_read_bandwidth_with_prefetch_with_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k]);
+				}
+			}
+			v->maximum_read_bandwidth_with_prefetch_without_immediate_flip = 0.0;
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				v->maximum_read_bandwidth_with_prefetch_without_immediate_flip = v->maximum_read_bandwidth_with_prefetch_without_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k]);
+			}
+			v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_yes;
+			if (v->maximum_read_bandwidth_with_prefetch_with_immediate_flip > v->return_bw_per_state[i]) {
+				v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no;
+			}
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				if (v->line_times_for_prefetch[k] < 2.0 || v->lines_for_meta_pte_with_immediate_flip[k] >= 8.0 || v->lines_for_meta_and_dpte_row_with_immediate_flip[k] >= 16.0) {
+					v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no;
+				}
+			}
+			v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_yes;
+			if (v->maximum_read_bandwidth_with_prefetch_without_immediate_flip > v->return_bw_per_state[i]) {
+				v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no;
+			}
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				if (v->line_times_for_prefetch[k] < 2.0 || v->lines_for_meta_pte_without_immediate_flip[k] >= 8.0 || v->lines_for_meta_and_dpte_row_without_immediate_flip[k] >= 16.0) {
+					v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no;
+				}
+			}
+		}
+	}
+	for (i = 0; i <= number_of_states_plus_one; i++) {
+		for (j = 0; j <= 1; j++) {
+			v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] = dcn_bw_yes;
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				if ((((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10) && (v->v_ratio_pre_ywith_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwith_immediate_flip[i][j][k] > 4.0)) || ((v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 || v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) && (v->v_ratio_pre_ywithout_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwithout_immediate_flip[i][j][k] > 4.0)))) {
+					v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no;
+				}
+			}
+			v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] = dcn_bw_yes;
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				if ((v->v_ratio_pre_ywithout_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwithout_immediate_flip[i][j][k] > 4.0)) {
+					v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no;
+				}
+			}
+		}
+	}
+	/*mode support, voltage state and soc configuration*/
+
+	for (i = number_of_states_plus_one; i >= 0; i--) {
+		for (j = 0; j <= 1; j++) {
+			if (v->scale_ratio_support == dcn_bw_yes && v->source_format_pixel_and_scan_support == dcn_bw_yes && v->viewport_size_support == dcn_bw_yes && v->bandwidth_support[i] == dcn_bw_yes && v->dio_support[i] == dcn_bw_yes && v->urgent_latency_support[i][j] == dcn_bw_yes && v->rob_support[i] == dcn_bw_yes && v->dispclk_dppclk_support[i][j] == dcn_bw_yes && v->total_available_pipes_support[i][j] == dcn_bw_yes && v->total_available_writeback_support == dcn_bw_yes && v->writeback_latency_support == dcn_bw_yes) {
+				if (v->prefetch_supported_with_immediate_flip[i][j] == dcn_bw_yes && v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] == dcn_bw_yes) {
+					v->mode_support_with_immediate_flip[i][j] = dcn_bw_yes;
+				}
+				else {
+					v->mode_support_with_immediate_flip[i][j] = dcn_bw_no;
+				}
+				if (v->prefetch_supported_without_immediate_flip[i][j] == dcn_bw_yes && v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] == dcn_bw_yes) {
+					v->mode_support_without_immediate_flip[i][j] = dcn_bw_yes;
+				}
+				else {
+					v->mode_support_without_immediate_flip[i][j] = dcn_bw_no;
+				}
+			}
+			else {
+				v->mode_support_with_immediate_flip[i][j] = dcn_bw_no;
+				v->mode_support_without_immediate_flip[i][j] = dcn_bw_no;
+			}
+		}
+	}
+	for (i = number_of_states_plus_one; i >= 0; i--) {
+		if ((i == number_of_states_plus_one || v->mode_support_with_immediate_flip[i][1] == dcn_bw_yes || v->mode_support_with_immediate_flip[i][0] == dcn_bw_yes) && i >= v->voltage_override_level) {
+			v->voltage_level_with_immediate_flip = i;
+		}
+	}
+	for (i = number_of_states_plus_one; i >= 0; i--) {
+		if ((i == number_of_states_plus_one || v->mode_support_without_immediate_flip[i][1] == dcn_bw_yes || v->mode_support_without_immediate_flip[i][0] == dcn_bw_yes) && i >= v->voltage_override_level) {
+			v->voltage_level_without_immediate_flip = i;
+		}
+	}
+	if (v->voltage_level_with_immediate_flip == number_of_states_plus_one) {
+		v->immediate_flip_supported = dcn_bw_no;
+		v->voltage_level = v->voltage_level_without_immediate_flip;
+	}
+	else {
+		v->immediate_flip_supported = dcn_bw_yes;
+		v->voltage_level = v->voltage_level_with_immediate_flip;
+	}
+	v->dcfclk = v->dcfclk_per_state[v->voltage_level];
+	v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_per_state[v->voltage_level];
+	for (j = 0; j <= 1; j++) {
+		v->required_dispclk_per_ratio[j] = v->required_dispclk[v->voltage_level][j];
+		for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+			v->dpp_per_plane_per_ratio[j][k] = v->no_of_dpp[v->voltage_level][j][k];
+		}
+		v->dispclk_dppclk_support_per_ratio[j] = v->dispclk_dppclk_support[v->voltage_level][j];
+	}
+	v->max_phyclk = v->phyclk_per_state[v->voltage_level];
+}
+void display_pipe_configuration(struct dcn_bw_internal_vars *v)
+{
+	int j;
+	int k;
+	/*display pipe configuration*/
+
+	for (j = 0; j <= 1; j++) {
+		v->total_number_of_active_dpp_per_ratio[j] = 0.0;
+		for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+			v->total_number_of_active_dpp_per_ratio[j] = v->total_number_of_active_dpp_per_ratio[j] + v->dpp_per_plane_per_ratio[j][k];
+		}
+	}
+	if ((v->dispclk_dppclk_support_per_ratio[0] == dcn_bw_yes && v->dispclk_dppclk_support_per_ratio[1] == dcn_bw_no) || (v->dispclk_dppclk_support_per_ratio[0] == v->dispclk_dppclk_support_per_ratio[1] && (v->total_number_of_active_dpp_per_ratio[0] < v->total_number_of_active_dpp_per_ratio[1] || (((v->total_number_of_active_dpp_per_ratio[0] == v->total_number_of_active_dpp_per_ratio[1]) && v->required_dispclk_per_ratio[0] <= 0.5 * v->required_dispclk_per_ratio[1]))))) {
+		v->dispclk_dppclk_ratio = 1;
+		v->final_error_message = v->error_message[0];
+	}
+	else {
+		v->dispclk_dppclk_ratio = 2;
+		v->final_error_message = v->error_message[1];
+	}
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		v->dpp_per_plane[k] = v->dpp_per_plane_per_ratio[v->dispclk_dppclk_ratio - 1][k];
+	}
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+			v->byte_per_pix_dety = 8.0;
+			v->byte_per_pix_detc = 0.0;
+		}
+		else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
+			v->byte_per_pix_dety = 4.0;
+			v->byte_per_pix_detc = 0.0;
+		}
+		else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+			v->byte_per_pix_dety = 2.0;
+			v->byte_per_pix_detc = 0.0;
+		}
+		else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+			v->byte_per_pix_dety = 1.0;
+			v->byte_per_pix_detc = 2.0;
+		}
+		else {
+			v->byte_per_pix_dety = 4.0f / 3.0f;
+			v->byte_per_pix_detc = 8.0f / 3.0f;
+		}
+		if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+			if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+				v->read256_bytes_block_height_y = 1.0;
+			}
+			else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+				v->read256_bytes_block_height_y = 4.0;
+			}
+			else {
+				v->read256_bytes_block_height_y = 8.0;
+			}
+			v->read256_bytes_block_width_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->read256_bytes_block_height_y;
+			v->read256_bytes_block_height_c = 0.0;
+			v->read256_bytes_block_width_c = 0.0;
+		}
+		else {
+			if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+				v->read256_bytes_block_height_y = 1.0;
+				v->read256_bytes_block_height_c = 1.0;
+			}
+			else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+				v->read256_bytes_block_height_y = 16.0;
+				v->read256_bytes_block_height_c = 8.0;
+			}
+			else {
+				v->read256_bytes_block_height_y = 8.0;
+				v->read256_bytes_block_height_c = 8.0;
+			}
+			v->read256_bytes_block_width_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->read256_bytes_block_height_y;
+			v->read256_bytes_block_width_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->read256_bytes_block_height_c;
+		}
+		if (v->source_scan[k] == dcn_bw_hor) {
+			v->maximum_swath_height_y = v->read256_bytes_block_height_y;
+			v->maximum_swath_height_c = v->read256_bytes_block_height_c;
+		}
+		else {
+			v->maximum_swath_height_y = v->read256_bytes_block_width_y;
+			v->maximum_swath_height_c = v->read256_bytes_block_width_c;
+		}
+		if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+			if (v->source_surface_mode[k] == dcn_bw_sw_linear || (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_var_s || v->source_surface_mode[k] == dcn_bw_sw_var_s_x) && v->source_scan[k] == dcn_bw_hor)) {
+				v->minimum_swath_height_y = v->maximum_swath_height_y;
+			}
+			else {
+				v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0;
+			}
+			v->minimum_swath_height_c = v->maximum_swath_height_c;
+		}
+		else {
+			if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+				v->minimum_swath_height_y = v->maximum_swath_height_y;
+				v->minimum_swath_height_c = v->maximum_swath_height_c;
+			}
+			else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 && v->source_scan[k] == dcn_bw_hor) {
+				v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0;
+				if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
+					v->minimum_swath_height_c = v->maximum_swath_height_c;
+				}
+				else {
+					v->minimum_swath_height_c = v->maximum_swath_height_c / 2.0;
+				}
+			}
+			else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10 && v->source_scan[k] == dcn_bw_hor) {
+				v->minimum_swath_height_c = v->maximum_swath_height_c / 2.0;
+				if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
+					v->minimum_swath_height_y = v->maximum_swath_height_y;
+				}
+				else {
+					v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0;
+				}
+			}
+			else {
+				v->minimum_swath_height_y = v->maximum_swath_height_y;
+				v->minimum_swath_height_c = v->maximum_swath_height_c;
+			}
+		}
+		if (v->source_scan[k] == dcn_bw_hor) {
+			v->swath_width = v->viewport_width[k] / v->dpp_per_plane[k];
+		}
+		else {
+			v->swath_width = v->viewport_height[k] / v->dpp_per_plane[k];
+		}
+		v->swath_width_granularity_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->maximum_swath_height_y;
+		v->rounded_up_max_swath_size_bytes_y = (dcn_bw_ceil2(v->swath_width - 1.0, v->swath_width_granularity_y) + v->swath_width_granularity_y) * v->byte_per_pix_dety * v->maximum_swath_height_y;
+		if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+			v->rounded_up_max_swath_size_bytes_y =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_y, 256.0) + 256;
+		}
+		if (v->maximum_swath_height_c > 0.0) {
+			v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->maximum_swath_height_c;
+		}
+		v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pix_detc * v->maximum_swath_height_c;
+		if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+			v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+		}
+		if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) {
+			v->swath_height_y[k] = v->maximum_swath_height_y;
+			v->swath_height_c[k] = v->maximum_swath_height_c;
+		}
+		else {
+			v->swath_height_y[k] = v->minimum_swath_height_y;
+			v->swath_height_c[k] = v->minimum_swath_height_c;
+		}
+		if (v->swath_height_c[k] == 0.0) {
+			v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0;
+			v->det_buffer_size_c[k] = 0.0;
+		}
+		else if (v->swath_height_y[k] <= v->swath_height_c[k]) {
+			v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0 / 2.0;
+			v->det_buffer_size_c[k] = v->det_buffer_size_in_kbyte * 1024.0 / 2.0;
+		}
+		else {
+			v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0 * 2.0 / 3.0;
+			v->det_buffer_size_c[k] = v->det_buffer_size_in_kbyte * 1024.0 / 3.0;
+		}
+	}
+}
+void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(struct dcn_bw_internal_vars *v)
+{
+	int k;
+	/*dispclk and dppclk calculation*/
+
+	v->dispclk_with_ramping = 0.0;
+	v->dispclk_without_ramping = 0.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->h_ratio[k] > 1.0) {
+			v->pscl_throughput[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] /dcn_bw_ceil2(v->htaps[k] / 6.0, 1.0));
+		}
+		else {
+			v->pscl_throughput[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
+		}
+		v->dppclk_using_single_dpp_luma = v->pixel_clock[k] *dcn_bw_max3(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_throughput[k], 1.0);
+		if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+			v->pscl_throughput_chroma[k] = 0.0;
+			v->dppclk_using_single_dpp = v->dppclk_using_single_dpp_luma;
+		}
+		else {
+			if (v->h_ratio[k] > 1.0) {
+				v->pscl_throughput_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] / 2.0 /dcn_bw_ceil2(v->hta_pschroma[k] / 6.0, 1.0));
+			}
+			else {
+				v->pscl_throughput_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
+			}
+			v->dppclk_using_single_dpp_chroma = v->pixel_clock[k] *dcn_bw_max3(v->vta_pschroma[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k] / 2.0), v->h_ratio[k] * v->v_ratio[k] / 4.0 / v->pscl_throughput_chroma[k], 1.0);
+			v->dppclk_using_single_dpp =dcn_bw_max2(v->dppclk_using_single_dpp_luma, v->dppclk_using_single_dpp_chroma);
+		}
+		if (v->odm_capable == dcn_bw_yes) {
+			v->dispclk_with_ramping =dcn_bw_max2(v->dispclk_with_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k] / v->dpp_per_plane[k]) * (1.0 + v->downspreading / 100.0) * (1.0 + v->dispclk_ramping_margin / 100.0));
+			v->dispclk_without_ramping =dcn_bw_max2(v->dispclk_without_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k] / v->dpp_per_plane[k]) * (1.0 + v->downspreading / 100.0));
+		}
+		else {
+			v->dispclk_with_ramping =dcn_bw_max2(v->dispclk_with_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k]) * (1.0 + v->downspreading / 100.0) * (1.0 + v->dispclk_ramping_margin / 100.0));
+			v->dispclk_without_ramping =dcn_bw_max2(v->dispclk_without_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k]) * (1.0 + v->downspreading / 100.0));
+		}
+	}
+	if (v->dispclk_without_ramping > v->max_dispclk[number_of_states]) {
+		v->dispclk = v->dispclk_without_ramping;
+	}
+	else if (v->dispclk_with_ramping > v->max_dispclk[number_of_states]) {
+		v->dispclk = v->max_dispclk[number_of_states];
+	}
+	else {
+		v->dispclk = v->dispclk_with_ramping;
+	}
+	v->dppclk = v->dispclk / v->dispclk_dppclk_ratio;
+	/*urgent watermark*/
+
+	v->return_bandwidth_to_dcn =dcn_bw_min2(v->return_bus_width * v->dcfclk, v->fabric_and_dram_bandwidth * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0);
+	v->dcc_enabled_any_plane = dcn_bw_no;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->dcc_enable[k] == dcn_bw_yes) {
+			v->dcc_enabled_any_plane = dcn_bw_yes;
+		}
+	}
+	v->return_bw = v->return_bandwidth_to_dcn;
+	if (v->dcc_enabled_any_plane == dcn_bw_yes && v->return_bandwidth_to_dcn > v->dcfclk * v->return_bus_width / 4.0) {
+		v->return_bw =dcn_bw_min2(v->return_bw, v->return_bandwidth_to_dcn * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bandwidth_to_dcn - v->dcfclk * v->return_bus_width / 4.0) + v->urgent_latency)));
+	}
+	v->critical_compression = 2.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
+	if (v->dcc_enabled_any_plane == dcn_bw_yes && v->critical_compression > 1.0 && v->critical_compression < 4.0) {
+		v->return_bw =dcn_bw_min2(v->return_bw, dcn_bw_pow(4.0 * v->return_bandwidth_to_dcn * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
+	}
+	v->return_bandwidth_to_dcn =dcn_bw_min2(v->return_bus_width * v->dcfclk, v->fabric_and_dram_bandwidth * 1000.0);
+	if (v->dcc_enabled_any_plane == dcn_bw_yes && v->return_bandwidth_to_dcn > v->dcfclk * v->return_bus_width / 4.0) {
+		v->return_bw =dcn_bw_min2(v->return_bw, v->return_bandwidth_to_dcn * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bandwidth_to_dcn - v->dcfclk * v->return_bus_width / 4.0) + v->urgent_latency)));
+	}
+	v->critical_compression = 2.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
+	if (v->dcc_enabled_any_plane == dcn_bw_yes && v->critical_compression > 1.0 && v->critical_compression < 4.0) {
+		v->return_bw =dcn_bw_min2(v->return_bw, dcn_bw_pow(4.0 * v->return_bandwidth_to_dcn * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
+	}
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->source_scan[k] == dcn_bw_hor) {
+			v->swath_width_y[k] = v->viewport_width[k] / v->dpp_per_plane[k];
+		}
+		else {
+			v->swath_width_y[k] = v->viewport_height[k] / v->dpp_per_plane[k];
+		}
+	}
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+			v->byte_per_pixel_dety[k] = 8.0;
+			v->byte_per_pixel_detc[k] = 0.0;
+		}
+		else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
+			v->byte_per_pixel_dety[k] = 4.0;
+			v->byte_per_pixel_detc[k] = 0.0;
+		}
+		else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+			v->byte_per_pixel_dety[k] = 2.0;
+			v->byte_per_pixel_detc[k] = 0.0;
+		}
+		else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+			v->byte_per_pixel_dety[k] = 1.0;
+			v->byte_per_pixel_detc[k] = 2.0;
+		}
+		else {
+			v->byte_per_pixel_dety[k] = 4.0f / 3.0f;
+			v->byte_per_pixel_detc[k] = 8.0f / 3.0f;
+		}
+	}
+	v->total_data_read_bandwidth = 0.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		v->read_bandwidth_plane_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k];
+		v->read_bandwidth_plane_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k] / 2.0;
+		v->total_data_read_bandwidth = v->total_data_read_bandwidth + v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k];
+	}
+	v->total_active_dpp = 0.0;
+	v->total_dcc_active_dpp = 0.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		v->total_active_dpp = v->total_active_dpp + v->dpp_per_plane[k];
+		if (v->dcc_enable[k] == dcn_bw_yes) {
+			v->total_dcc_active_dpp = v->total_dcc_active_dpp + v->dpp_per_plane[k];
+		}
+	}
+	v->urgent_round_trip_and_out_of_order_latency = (v->round_trip_ping_latency_cycles + 32.0) / v->dcfclk + v->urgent_out_of_order_return_per_channel * v->number_of_channels / v->return_bw;
+	v->last_pixel_of_line_extra_watermark = 0.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->v_ratio[k] <= 1.0) {
+			v->display_pipe_line_delivery_time_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k];
+		}
+		else {
+			v->display_pipe_line_delivery_time_luma[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk;
+		}
+		v->data_fabric_line_delivery_time_luma = v->swath_width_y[k] * v->swath_height_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->return_bw * v->read_bandwidth_plane_luma[k] / v->dpp_per_plane[k] / v->total_data_read_bandwidth);
+		v->last_pixel_of_line_extra_watermark =dcn_bw_max2(v->last_pixel_of_line_extra_watermark, v->data_fabric_line_delivery_time_luma - v->display_pipe_line_delivery_time_luma[k]);
+		if (v->byte_per_pixel_detc[k] == 0.0) {
+			v->display_pipe_line_delivery_time_chroma[k] = 0.0;
+		}
+		else {
+			if (v->v_ratio[k] / 2.0 <= 1.0) {
+				v->display_pipe_line_delivery_time_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] / (v->h_ratio[k] / 2.0) / v->pixel_clock[k];
+			}
+			else {
+				v->display_pipe_line_delivery_time_chroma[k] = v->swath_width_y[k] / 2.0 / v->pscl_throughput_chroma[k] / v->dppclk;
+			}
+			v->data_fabric_line_delivery_time_chroma = v->swath_width_y[k] / 2.0 * v->swath_height_c[k] *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->return_bw * v->read_bandwidth_plane_chroma[k] / v->dpp_per_plane[k] / v->total_data_read_bandwidth);
+			v->last_pixel_of_line_extra_watermark =dcn_bw_max2(v->last_pixel_of_line_extra_watermark, v->data_fabric_line_delivery_time_chroma - v->display_pipe_line_delivery_time_chroma[k]);
+		}
+	}
+	v->urgent_extra_latency = v->urgent_round_trip_and_out_of_order_latency + (v->total_active_dpp * v->pixel_chunk_size_in_kbyte + v->total_dcc_active_dpp * v->meta_chunk_size) * 1024.0 / v->return_bw;
+	if (v->pte_enable == dcn_bw_yes) {
+		v->urgent_extra_latency = v->urgent_extra_latency + v->total_active_dpp * v->pte_chunk_size * 1024.0 / v->return_bw;
+	}
+	v->urgent_watermark = v->urgent_latency + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency;
+	v->ptemeta_urgent_watermark = v->urgent_watermark + 2.0 * v->urgent_latency;
+	/*nb p-state/dram clock change watermark*/
+
+	v->dram_clock_change_watermark = v->dram_clock_change_latency + v->urgent_watermark;
+	v->total_active_writeback = 0.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->output[k] == dcn_bw_writeback) {
+			v->total_active_writeback = v->total_active_writeback + 1.0;
+		}
+	}
+	if (v->total_active_writeback <= 1.0) {
+		v->writeback_dram_clock_change_watermark = v->dram_clock_change_latency + v->write_back_latency;
+	}
+	else {
+		v->writeback_dram_clock_change_watermark = v->dram_clock_change_latency + v->write_back_latency + v->writeback_chunk_size * 1024.0 / 32.0 / v->socclk;
+	}
+	/*stutter efficiency*/
+
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		v->lines_in_dety[k] = v->det_buffer_size_y[k] / v->byte_per_pixel_dety[k] / v->swath_width_y[k];
+		v->lines_in_dety_rounded_down_to_swath[k] =dcn_bw_floor2(v->lines_in_dety[k], v->swath_height_y[k]);
+		v->full_det_buffering_time_y[k] = v->lines_in_dety_rounded_down_to_swath[k] * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k];
+		if (v->byte_per_pixel_detc[k] > 0.0) {
+			v->lines_in_detc[k] = v->det_buffer_size_c[k] / v->byte_per_pixel_detc[k] / (v->swath_width_y[k] / 2.0);
+			v->lines_in_detc_rounded_down_to_swath[k] =dcn_bw_floor2(v->lines_in_detc[k], v->swath_height_c[k]);
+			v->full_det_buffering_time_c[k] = v->lines_in_detc_rounded_down_to_swath[k] * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0);
+		}
+		else {
+			v->lines_in_detc[k] = 0.0;
+			v->lines_in_detc_rounded_down_to_swath[k] = 0.0;
+			v->full_det_buffering_time_c[k] = 999999.0;
+		}
+	}
+	v->min_full_det_buffering_time = 999999.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->full_det_buffering_time_y[k] < v->min_full_det_buffering_time) {
+			v->min_full_det_buffering_time = v->full_det_buffering_time_y[k];
+			v->frame_time_for_min_full_det_buffering_time = v->vtotal[k] * v->htotal[k] / v->pixel_clock[k];
+		}
+		if (v->full_det_buffering_time_c[k] < v->min_full_det_buffering_time) {
+			v->min_full_det_buffering_time = v->full_det_buffering_time_c[k];
+			v->frame_time_for_min_full_det_buffering_time = v->vtotal[k] * v->htotal[k] / v->pixel_clock[k];
+		}
+	}
+	v->average_read_bandwidth_gbyte_per_second = 0.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->dcc_enable[k] == dcn_bw_yes) {
+			v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / v->dcc_rate[k] / 1000.0 + v->read_bandwidth_plane_chroma[k] / v->dcc_rate[k] / 1000.0;
+		}
+		else {
+			v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 + v->read_bandwidth_plane_chroma[k] / 1000.0;
+		}
+		if (v->dcc_enable[k] == dcn_bw_yes) {
+			v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 / 256.0 + v->read_bandwidth_plane_chroma[k] / 1000.0 / 256.0;
+		}
+		if (v->pte_enable == dcn_bw_yes) {
+			v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 / 512.0 + v->read_bandwidth_plane_chroma[k] / 1000.0 / 512.0;
+		}
+	}
+	v->part_of_burst_that_fits_in_rob =dcn_bw_min2(v->min_full_det_buffering_time * v->total_data_read_bandwidth, v->rob_buffer_size_in_kbyte * 1024.0 * v->total_data_read_bandwidth / (v->average_read_bandwidth_gbyte_per_second * 1000.0));
+	v->stutter_burst_time = v->part_of_burst_that_fits_in_rob * (v->average_read_bandwidth_gbyte_per_second * 1000.0) / v->total_data_read_bandwidth / v->return_bw + (v->min_full_det_buffering_time * v->total_data_read_bandwidth - v->part_of_burst_that_fits_in_rob) / (v->dcfclk * 64.0);
+	if (v->total_active_writeback == 0.0) {
+		v->stutter_efficiency_not_including_vblank = (1.0 - (v->sr_exit_time + v->stutter_burst_time) / v->min_full_det_buffering_time) * 100.0;
+	}
+	else {
+		v->stutter_efficiency_not_including_vblank = 0.0;
+	}
+	v->smallest_vblank = 999999.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->synchronized_vblank == dcn_bw_yes || v->number_of_active_planes == 1) {
+			v->v_blank_time = (v->vtotal[k] - v->vactive[k]) * v->htotal[k] / v->pixel_clock[k];
+		}
+		else {
+			v->v_blank_time = 0.0;
+		}
+		v->smallest_vblank =dcn_bw_min2(v->smallest_vblank, v->v_blank_time);
+	}
+	v->stutter_efficiency = (v->stutter_efficiency_not_including_vblank / 100.0 * (v->frame_time_for_min_full_det_buffering_time - v->smallest_vblank) + v->smallest_vblank) / v->frame_time_for_min_full_det_buffering_time * 100.0;
+	/*dcfclk deep sleep*/
+
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->byte_per_pixel_detc[k] > 0.0) {
+			v->dcfclk_deep_sleep_per_plane[k] =dcn_bw_max2(1.1 * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 32.0 / v->display_pipe_line_delivery_time_luma[k], 1.1 * v->swath_width_y[k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 32.0 / v->display_pipe_line_delivery_time_chroma[k]);
+		}
+		else {
+			v->dcfclk_deep_sleep_per_plane[k] = 1.1 * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 64.0 / v->display_pipe_line_delivery_time_luma[k];
+		}
+		v->dcfclk_deep_sleep_per_plane[k] =dcn_bw_max2(v->dcfclk_deep_sleep_per_plane[k], v->pixel_clock[k] / 16.0);
+	}
+	v->dcf_clk_deep_sleep = 8.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		v->dcf_clk_deep_sleep =dcn_bw_max2(v->dcf_clk_deep_sleep, v->dcfclk_deep_sleep_per_plane[k]);
+	}
+	/*stutter watermark*/
+
+	v->stutter_exit_watermark = v->sr_exit_time + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency + 10.0 / v->dcf_clk_deep_sleep;
+	v->stutter_enter_plus_exit_watermark = v->sr_enter_plus_exit_time + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency;
+	/*urgent latency supported*/
+
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		v->effective_det_plus_lb_lines_luma =dcn_bw_floor2(v->lines_in_dety[k] +dcn_bw_min2(v->lines_in_dety[k] * v->dppclk * v->byte_per_pixel_dety[k] * v->pscl_throughput[k] / (v->return_bw / v->dpp_per_plane[k]), v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_y[k]);
+		v->urgent_latency_support_us_luma = v->effective_det_plus_lb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_det_plus_lb_lines_luma * v->swath_width_y[k] * v->byte_per_pixel_dety[k] / (v->return_bw / v->dpp_per_plane[k]);
+		if (v->byte_per_pixel_detc[k] > 0.0) {
+			v->effective_det_plus_lb_lines_chroma =dcn_bw_floor2(v->lines_in_detc[k] +dcn_bw_min2(v->lines_in_detc[k] * v->dppclk * v->byte_per_pixel_detc[k] * v->pscl_throughput_chroma[k] / (v->return_bw / v->dpp_per_plane[k]), v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_c[k]);
+			v->urgent_latency_support_us_chroma = v->effective_det_plus_lb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_det_plus_lb_lines_chroma * (v->swath_width_y[k] / 2.0) * v->byte_per_pixel_detc[k] / (v->return_bw / v->dpp_per_plane[k]);
+			v->urgent_latency_support_us[k] =dcn_bw_min2(v->urgent_latency_support_us_luma, v->urgent_latency_support_us_chroma);
+		}
+		else {
+			v->urgent_latency_support_us[k] = v->urgent_latency_support_us_luma;
+		}
+	}
+	v->min_urgent_latency_support_us = 999999.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		v->min_urgent_latency_support_us =dcn_bw_min2(v->min_urgent_latency_support_us, v->urgent_latency_support_us[k]);
+	}
+	/*non-urgent latency tolerance*/
+
+	v->non_urgent_latency_tolerance = v->min_urgent_latency_support_us - v->urgent_watermark;
+	/*prefetch*/
+
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+			if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+				v->block_height256_bytes_y = 1.0;
+			}
+			else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+				v->block_height256_bytes_y = 4.0;
+			}
+			else {
+				v->block_height256_bytes_y = 8.0;
+			}
+			v->block_height256_bytes_c = 0.0;
+		}
+		else {
+			if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+				v->block_height256_bytes_y = 1.0;
+				v->block_height256_bytes_c = 1.0;
+			}
+			else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+				v->block_height256_bytes_y = 16.0;
+				v->block_height256_bytes_c = 8.0;
+			}
+			else {
+				v->block_height256_bytes_y = 8.0;
+				v->block_height256_bytes_c = 8.0;
+			}
+		}
+		if (v->dcc_enable[k] == dcn_bw_yes) {
+			v->meta_request_width_y = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (8.0 * v->block_height256_bytes_y);
+			v->meta_surf_width_y =dcn_bw_ceil2(v->swath_width_y[k] - 1.0, v->meta_request_width_y) + v->meta_request_width_y;
+			v->meta_surf_height_y =dcn_bw_ceil2(v->viewport_height[k] - 1.0, 8.0 * v->block_height256_bytes_y) + 8.0 * v->block_height256_bytes_y;
+			if (v->pte_enable == dcn_bw_yes) {
+				v->meta_pte_bytes_frame_y = (dcn_bw_ceil2((v->meta_surf_width_y * v->meta_surf_height_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
+			}
+			else {
+				v->meta_pte_bytes_frame_y = 0.0;
+			}
+			if (v->source_scan[k] == dcn_bw_hor) {
+				v->meta_row_byte_y = v->meta_surf_width_y * 8.0 * v->block_height256_bytes_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0;
+			}
+			else {
+				v->meta_row_byte_y = v->meta_surf_height_y * v->meta_request_width_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0;
+			}
+		}
+		else {
+			v->meta_pte_bytes_frame_y = 0.0;
+			v->meta_row_byte_y = 0.0;
+		}
+		if (v->pte_enable == dcn_bw_yes) {
+			if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+				v->macro_tile_size_byte_y = 256.0;
+				v->macro_tile_height_y = 1.0;
+			}
+			else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
+				v->macro_tile_size_byte_y = 4096.0;
+				v->macro_tile_height_y = 4.0 * v->block_height256_bytes_y;
+			}
+			else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
+				v->macro_tile_size_byte_y = 64.0 * 1024;
+				v->macro_tile_height_y = 16.0 * v->block_height256_bytes_y;
+			}
+			else {
+				v->macro_tile_size_byte_y = 256.0 * 1024;
+				v->macro_tile_height_y = 32.0 * v->block_height256_bytes_y;
+			}
+			if (v->macro_tile_size_byte_y <= 65536.0) {
+				v->pixel_pte_req_height_y = v->macro_tile_height_y;
+			}
+			else {
+				v->pixel_pte_req_height_y = 16.0 * v->block_height256_bytes_y;
+			}
+			v->pixel_pte_req_width_y = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / v->pixel_pte_req_height_y * 8;
+			if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+				v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] *dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->pixel_pte_req_width_y / v->swath_width_y[k], 2.0), 1.0))) - 1.0) / v->pixel_pte_req_width_y, 1.0) + 1);
+			}
+			else if (v->source_scan[k] == dcn_bw_hor) {
+				v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] - 1.0) / v->pixel_pte_req_width_y, 1.0) + 1);
+			}
+			else {
+				v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] - 1.0) / v->pixel_pte_req_height_y, 1.0) + 1);
+			}
+		}
+		else {
+			v->pixel_pte_bytes_per_row_y = 0.0;
+		}
+		if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
+			if (v->dcc_enable[k] == dcn_bw_yes) {
+				v->meta_request_width_c = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (8.0 * v->block_height256_bytes_c);
+				v->meta_surf_width_c =dcn_bw_ceil2(v->swath_width_y[k] / 2.0 - 1.0, v->meta_request_width_c) + v->meta_request_width_c;
+				v->meta_surf_height_c =dcn_bw_ceil2(v->viewport_height[k] / 2.0 - 1.0, 8.0 * v->block_height256_bytes_c) + 8.0 * v->block_height256_bytes_c;
+				if (v->pte_enable == dcn_bw_yes) {
+					v->meta_pte_bytes_frame_c = (dcn_bw_ceil2((v->meta_surf_width_c * v->meta_surf_height_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
+				}
+				else {
+					v->meta_pte_bytes_frame_c = 0.0;
+				}
+				if (v->source_scan[k] == dcn_bw_hor) {
+					v->meta_row_byte_c = v->meta_surf_width_c * 8.0 * v->block_height256_bytes_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0;
+				}
+				else {
+					v->meta_row_byte_c = v->meta_surf_height_c * v->meta_request_width_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0;
+				}
+			}
+			else {
+				v->meta_pte_bytes_frame_c = 0.0;
+				v->meta_row_byte_c = 0.0;
+			}
+			if (v->pte_enable == dcn_bw_yes) {
+				if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+					v->macro_tile_size_bytes_c = 256.0;
+					v->macro_tile_height_c = 1.0;
+				}
+				else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
+					v->macro_tile_size_bytes_c = 4096.0;
+					v->macro_tile_height_c = 4.0 * v->block_height256_bytes_c;
+				}
+				else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
+					v->macro_tile_size_bytes_c = 64.0 * 1024;
+					v->macro_tile_height_c = 16.0 * v->block_height256_bytes_c;
+				}
+				else {
+					v->macro_tile_size_bytes_c = 256.0 * 1024;
+					v->macro_tile_height_c = 32.0 * v->block_height256_bytes_c;
+				}
+				if (v->macro_tile_size_bytes_c <= 65536.0) {
+					v->pixel_pte_req_height_c = v->macro_tile_height_c;
+				}
+				else {
+					v->pixel_pte_req_height_c = 16.0 * v->block_height256_bytes_c;
+				}
+				v->pixel_pte_req_width_c = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / v->pixel_pte_req_height_c * 8;
+				if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+					v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] / 2.0 * dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->pixel_pte_req_width_c / (v->swath_width_y[k] / 2.0), 2.0), 1.0))) - 1.0) / v->pixel_pte_req_width_c, 1.0) + 1);
+				}
+				else if (v->source_scan[k] == dcn_bw_hor) {
+					v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] / 2.0 - 1.0) / v->pixel_pte_req_width_c, 1.0) + 1);
+				}
+				else {
+					v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] / 2.0 - 1.0) / v->pixel_pte_req_height_c, 1.0) + 1);
+				}
+			}
+			else {
+				v->pixel_pte_bytes_per_row_c = 0.0;
+			}
+		}
+		else {
+			v->pixel_pte_bytes_per_row_c = 0.0;
+			v->meta_pte_bytes_frame_c = 0.0;
+			v->meta_row_byte_c = 0.0;
+		}
+		v->pixel_pte_bytes_per_row[k] = v->pixel_pte_bytes_per_row_y + v->pixel_pte_bytes_per_row_c;
+		v->meta_pte_bytes_frame[k] = v->meta_pte_bytes_frame_y + v->meta_pte_bytes_frame_c;
+		v->meta_row_byte[k] = v->meta_row_byte_y + v->meta_row_byte_c;
+		v->v_init_pre_fill_y[k] =dcn_bw_floor2((v->v_ratio[k] + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k]) / 2.0, 1.0);
+		v->max_num_swath_y[k] =dcn_bw_ceil2((v->v_init_pre_fill_y[k] - 1.0) / v->swath_height_y[k], 1.0) + 1;
+		if (v->v_init_pre_fill_y[k] > 1.0) {
+			v->max_partial_swath_y =dcn_bw_mod((v->v_init_pre_fill_y[k] - 2.0), v->swath_height_y[k]);
+		}
+		else {
+			v->max_partial_swath_y =dcn_bw_mod((v->v_init_pre_fill_y[k] + v->swath_height_y[k] - 2.0), v->swath_height_y[k]);
+		}
+		v->max_partial_swath_y =dcn_bw_max2(1.0, v->max_partial_swath_y);
+		v->prefetch_source_lines_y[k] = v->max_num_swath_y[k] * v->swath_height_y[k] + v->max_partial_swath_y;
+		if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
+			v->v_init_pre_fill_c[k] =dcn_bw_floor2((v->v_ratio[k] / 2.0 + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k] / 2.0) / 2.0, 1.0);
+			v->max_num_swath_c[k] =dcn_bw_ceil2((v->v_init_pre_fill_c[k] - 1.0) / v->swath_height_c[k], 1.0) + 1;
+			if (v->v_init_pre_fill_c[k] > 1.0) {
+				v->max_partial_swath_c =dcn_bw_mod((v->v_init_pre_fill_c[k] - 2.0), v->swath_height_c[k]);
+			}
+			else {
+				v->max_partial_swath_c =dcn_bw_mod((v->v_init_pre_fill_c[k] + v->swath_height_c[k] - 2.0), v->swath_height_c[k]);
+			}
+			v->max_partial_swath_c =dcn_bw_max2(1.0, v->max_partial_swath_c);
+		}
+		else {
+			v->max_num_swath_c[k] = 0.0;
+			v->max_partial_swath_c = 0.0;
+		}
+		v->prefetch_source_lines_c[k] = v->max_num_swath_c[k] * v->swath_height_c[k] + v->max_partial_swath_c;
+	}
+	v->t_calc = 24.0 / v->dcf_clk_deep_sleep;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes) {
+			v->max_vstartup_lines[k] = v->vtotal[k] - v->vactive[k] - 1.0;
+		}
+		else {
+			v->max_vstartup_lines[k] = v->v_sync_plus_back_porch[k] - 1.0;
+		}
+	}
+	v->next_prefetch_mode = 0.0;
+	do {
+		v->v_startup_lines = 13.0;
+		do {
+			v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw = dcn_bw_yes;
+			v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 = dcn_bw_no;
+			v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 = dcn_bw_no;
+			v->v_ratio_prefetch_more_than4 = dcn_bw_no;
+			v->destination_line_times_for_prefetch_less_than2 = dcn_bw_no;
+			v->prefetch_mode = v->next_prefetch_mode;
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				v->dstx_after_scaler = 90.0 * v->pixel_clock[k] / v->dppclk + 42.0 * v->pixel_clock[k] / v->dispclk;
+				if (v->dpp_per_plane[k] > 1.0) {
+					v->dstx_after_scaler = v->dstx_after_scaler + v->scaler_rec_out_width[k] / 2.0;
+				}
+				if (v->output_format[k] == dcn_bw_420) {
+					v->dsty_after_scaler = 1.0;
+				}
+				else {
+					v->dsty_after_scaler = 0.0;
+				}
+				v->v_update_offset_pix =dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
+				v->total_repeater_delay_time = v->max_inter_dcn_tile_repeaters * (2.0 / v->dppclk + 3.0 / v->dispclk);
+				v->v_update_width_pix = (14.0 / v->dcf_clk_deep_sleep + 12.0 / v->dppclk + v->total_repeater_delay_time) * v->pixel_clock[k];
+				v->v_ready_offset_pix =dcn_bw_max2(150.0 / v->dppclk, v->total_repeater_delay_time + 20.0 / v->dcf_clk_deep_sleep + 10.0 / v->dppclk) * v->pixel_clock[k];
+				v->t_setup = (v->v_update_offset_pix + v->v_update_width_pix + v->v_ready_offset_pix) / v->pixel_clock[k];
+				v->v_startup[k] =dcn_bw_min2(v->v_startup_lines, v->max_vstartup_lines[k]);
+				if (v->prefetch_mode == 0.0) {
+					v->t_wait =dcn_bw_max3(v->dram_clock_change_latency + v->urgent_latency, v->sr_enter_plus_exit_time, v->urgent_latency);
+				}
+				else if (v->prefetch_mode == 1.0) {
+					v->t_wait =dcn_bw_max2(v->sr_enter_plus_exit_time, v->urgent_latency);
+				}
+				else {
+					v->t_wait = v->urgent_latency;
+				}
+				v->destination_lines_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->v_startup[k] - v->t_wait / (v->htotal[k] / v->pixel_clock[k]) - (v->t_calc + v->t_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dsty_after_scaler + v->dstx_after_scaler / v->htotal[k]) + 0.125), 1.0) / 4;
+				if (v->destination_lines_for_prefetch[k] > 0.0) {
+					v->prefetch_bandwidth[k] = (v->meta_pte_bytes_frame[k] + 2.0 * v->meta_row_byte[k] + 2.0 * v->pixel_pte_bytes_per_row[k] + v->prefetch_source_lines_y[k] * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) + v->prefetch_source_lines_c[k] * v->swath_width_y[k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0)) / (v->destination_lines_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]);
+				}
+				else {
+					v->prefetch_bandwidth[k] = 999999.0;
+				}
+			}
+			v->bandwidth_available_for_immediate_flip = v->return_bw;
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				v->bandwidth_available_for_immediate_flip = v->bandwidth_available_for_immediate_flip -dcn_bw_max2(v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k], v->prefetch_bandwidth[k]);
+			}
+			v->tot_immediate_flip_bytes = 0.0;
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+					v->tot_immediate_flip_bytes = v->tot_immediate_flip_bytes + v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k];
+				}
+			}
+			v->max_rd_bandwidth = 0.0;
+			for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+				if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) {
+					if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+						v->time_for_fetching_meta_pte =dcn_bw_max5(v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], v->meta_pte_bytes_frame[k] * v->tot_immediate_flip_bytes / (v->bandwidth_available_for_immediate_flip * (v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k])), v->urgent_extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
+					}
+					else {
+						v->time_for_fetching_meta_pte =dcn_bw_max3(v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], v->urgent_extra_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
+					}
+				}
+				else {
+					v->time_for_fetching_meta_pte = v->htotal[k] / v->pixel_clock[k] / 4.0;
+				}
+				v->destination_lines_to_request_vm_inv_blank[k] =dcn_bw_floor2(4.0 * (v->time_for_fetching_meta_pte / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+				if ((v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes)) {
+					if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+						v->time_for_fetching_row_in_vblank =dcn_bw_max5((v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / v->prefetch_bandwidth[k], (v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) * v->tot_immediate_flip_bytes / (v->bandwidth_available_for_immediate_flip * (v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k])), v->urgent_extra_latency, 2.0 * v->urgent_latency, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte);
+					}
+					else {
+						v->time_for_fetching_row_in_vblank =dcn_bw_max3((v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / v->prefetch_bandwidth[k], v->urgent_extra_latency, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte);
+					}
+				}
+				else {
+					v->time_for_fetching_row_in_vblank =dcn_bw_max2(v->urgent_extra_latency - v->time_for_fetching_meta_pte, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte);
+				}
+				v->destination_lines_to_request_row_in_vblank[k] =dcn_bw_floor2(4.0 * (v->time_for_fetching_row_in_vblank / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+				v->lines_to_request_prefetch_pixel_data = v->destination_lines_for_prefetch[k] - v->destination_lines_to_request_vm_inv_blank[k] - v->destination_lines_to_request_row_in_vblank[k];
+				if (v->lines_to_request_prefetch_pixel_data > 0.0) {
+					v->v_ratio_prefetch_y[k] = v->prefetch_source_lines_y[k] / v->lines_to_request_prefetch_pixel_data;
+					if ((v->swath_height_y[k] > 4.0)) {
+						if (v->lines_to_request_prefetch_pixel_data > (v->v_init_pre_fill_y[k] - 3.0) / 2.0) {
+							v->v_ratio_prefetch_y[k] =dcn_bw_max2(v->v_ratio_prefetch_y[k], v->max_num_swath_y[k] * v->swath_height_y[k] / (v->lines_to_request_prefetch_pixel_data - (v->v_init_pre_fill_y[k] - 3.0) / 2.0));
+						}
+						else {
+							v->v_ratio_prefetch_y[k] = 999999.0;
+						}
+					}
+				}
+				else {
+					v->v_ratio_prefetch_y[k] = 999999.0;
+				}
+				v->v_ratio_prefetch_y[k] =dcn_bw_max2(v->v_ratio_prefetch_y[k], 1.0);
+				if (v->lines_to_request_prefetch_pixel_data > 0.0) {
+					v->v_ratio_prefetch_c[k] = v->prefetch_source_lines_c[k] / v->lines_to_request_prefetch_pixel_data;
+					if ((v->swath_height_c[k] > 4.0)) {
+						if (v->lines_to_request_prefetch_pixel_data > (v->v_init_pre_fill_c[k] - 3.0) / 2.0) {
+							v->v_ratio_prefetch_c[k] =dcn_bw_max2(v->v_ratio_prefetch_c[k], v->max_num_swath_c[k] * v->swath_height_c[k] / (v->lines_to_request_prefetch_pixel_data - (v->v_init_pre_fill_c[k] - 3.0) / 2.0));
+						}
+						else {
+							v->v_ratio_prefetch_c[k] = 999999.0;
+						}
+					}
+				}
+				else {
+					v->v_ratio_prefetch_c[k] = 999999.0;
+				}
+				v->v_ratio_prefetch_c[k] =dcn_bw_max2(v->v_ratio_prefetch_c[k], 1.0);
+				if (v->lines_to_request_prefetch_pixel_data > 0.0) {
+					v->required_prefetch_pix_data_bw = v->dpp_per_plane[k] * (v->prefetch_source_lines_y[k] / v->lines_to_request_prefetch_pixel_data *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) + v->prefetch_source_lines_c[k] / v->lines_to_request_prefetch_pixel_data *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 2.0) * v->swath_width_y[k] / (v->htotal[k] / v->pixel_clock[k]);
+				}
+				else {
+					v->required_prefetch_pix_data_bw = 999999.0;
+				}
+				v->max_rd_bandwidth = v->max_rd_bandwidth +dcn_bw_max2(v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k], v->required_prefetch_pix_data_bw);
+				if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+					v->max_rd_bandwidth = v->max_rd_bandwidth +dcn_bw_max2(v->meta_pte_bytes_frame[k] / (v->destination_lines_to_request_vm_inv_blank[k] * v->htotal[k] / v->pixel_clock[k]), (v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / (v->destination_lines_to_request_row_in_vblank[k] * v->htotal[k] / v->pixel_clock[k]));
+				}
+				if (v->v_ratio_prefetch_y[k] > 4.0 || v->v_ratio_prefetch_c[k] > 4.0) {
+					v->v_ratio_prefetch_more_than4 = dcn_bw_yes;
+				}
+				if (v->destination_lines_for_prefetch[k] < 2.0) {
+					v->destination_line_times_for_prefetch_less_than2 = dcn_bw_yes;
+				}
+				if (v->max_vstartup_lines[k] > v->v_startup_lines) {
+					if (v->required_prefetch_pix_data_bw > (v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k])) {
+						v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw = dcn_bw_no;
+					}
+					if (v->v_ratio_prefetch_y[k] > 4.0 || v->v_ratio_prefetch_c[k] > 4.0) {
+						v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 = dcn_bw_yes;
+					}
+					if (v->destination_lines_for_prefetch[k] < 2.0) {
+						v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 = dcn_bw_yes;
+					}
+				}
+			}
+			if (v->max_rd_bandwidth <= v->return_bw && v->v_ratio_prefetch_more_than4 == dcn_bw_no && v->destination_line_times_for_prefetch_less_than2 == dcn_bw_no) {
+				v->prefetch_mode_supported = dcn_bw_yes;
+			}
+			else {
+				v->prefetch_mode_supported = dcn_bw_no;
+			}
+			v->v_startup_lines = v->v_startup_lines + 1.0;
+		} while (!(v->prefetch_mode_supported == dcn_bw_yes || (v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw == dcn_bw_yes && v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 == dcn_bw_no && v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 == dcn_bw_no)));
+		v->next_prefetch_mode = v->next_prefetch_mode + 1.0;
+	} while (!(v->prefetch_mode_supported == dcn_bw_yes || v->prefetch_mode == 2.0));
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->v_ratio_prefetch_y[k] <= 1.0) {
+			v->display_pipe_line_delivery_time_luma_prefetch[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k];
+		}
+		else {
+			v->display_pipe_line_delivery_time_luma_prefetch[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk;
+		}
+		if (v->byte_per_pixel_detc[k] == 0.0) {
+			v->display_pipe_line_delivery_time_chroma_prefetch[k] = 0.0;
+		}
+		else {
+			if (v->v_ratio_prefetch_c[k] <= 1.0) {
+				v->display_pipe_line_delivery_time_chroma_prefetch[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k];
+			}
+			else {
+				v->display_pipe_line_delivery_time_chroma_prefetch[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk;
+			}
+		}
+	}
+	/*min ttuv_blank*/
+
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->prefetch_mode == 0.0) {
+			v->allow_dram_clock_change_during_vblank[k] = dcn_bw_yes;
+			v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_yes;
+			v->min_ttuv_blank[k] = v->t_calc +dcn_bw_max3(v->dram_clock_change_watermark, v->stutter_enter_plus_exit_watermark, v->urgent_watermark);
+		}
+		else if (v->prefetch_mode == 1.0) {
+			v->allow_dram_clock_change_during_vblank[k] = dcn_bw_no;
+			v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_yes;
+			v->min_ttuv_blank[k] = v->t_calc +dcn_bw_max2(v->stutter_enter_plus_exit_watermark, v->urgent_watermark);
+		}
+		else {
+			v->allow_dram_clock_change_during_vblank[k] = dcn_bw_no;
+			v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_no;
+			v->min_ttuv_blank[k] = v->t_calc + v->urgent_watermark;
+		}
+	}
+	/*nb p-state/dram clock change support*/
+
+	v->active_dp_ps = 0.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		v->active_dp_ps = v->active_dp_ps + v->dpp_per_plane[k];
+	}
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		v->lb_latency_hiding_source_lines_y =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_y[k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0);
+		v->lb_latency_hiding_source_lines_c =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_y[k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0);
+		v->effective_lb_latency_hiding_y = v->lb_latency_hiding_source_lines_y / v->v_ratio[k] * (v->htotal[k] / v->pixel_clock[k]);
+		v->effective_lb_latency_hiding_c = v->lb_latency_hiding_source_lines_c / (v->v_ratio[k] / 2.0) * (v->htotal[k] / v->pixel_clock[k]);
+		if (v->swath_width_y[k] > 2.0 * v->dpp_output_buffer_pixels) {
+			v->dpp_output_buffer_lines_y = v->dpp_output_buffer_pixels / v->swath_width_y[k];
+		}
+		else if (v->swath_width_y[k] > v->dpp_output_buffer_pixels) {
+			v->dpp_output_buffer_lines_y = 0.5;
+		}
+		else {
+			v->dpp_output_buffer_lines_y = 1.0;
+		}
+		if (v->swath_width_y[k] / 2.0 > 2.0 * v->dpp_output_buffer_pixels) {
+			v->dpp_output_buffer_lines_c = v->dpp_output_buffer_pixels / (v->swath_width_y[k] / 2.0);
+		}
+		else if (v->swath_width_y[k] / 2.0 > v->dpp_output_buffer_pixels) {
+			v->dpp_output_buffer_lines_c = 0.5;
+		}
+		else {
+			v->dpp_output_buffer_lines_c = 1.0;
+		}
+		v->dppopp_buffering_y = (v->htotal[k] / v->pixel_clock[k]) * (v->dpp_output_buffer_lines_y + v->opp_output_buffer_lines);
+		v->max_det_buffering_time_y = v->full_det_buffering_time_y[k] + (v->lines_in_dety[k] - v->lines_in_dety_rounded_down_to_swath[k]) / v->swath_height_y[k] * (v->htotal[k] / v->pixel_clock[k]);
+		v->active_dram_clock_change_latency_margin_y = v->dppopp_buffering_y + v->effective_lb_latency_hiding_y + v->max_det_buffering_time_y - v->dram_clock_change_watermark;
+		if (v->active_dp_ps > 1.0) {
+			v->active_dram_clock_change_latency_margin_y = v->active_dram_clock_change_latency_margin_y - (1.0 - 1.0 / (v->active_dp_ps - 1.0)) * v->swath_height_y[k] * (v->htotal[k] / v->pixel_clock[k]);
+		}
+		if (v->byte_per_pixel_detc[k] > 0.0) {
+			v->dppopp_buffering_c = (v->htotal[k] / v->pixel_clock[k]) * (v->dpp_output_buffer_lines_c + v->opp_output_buffer_lines);
+			v->max_det_buffering_time_c = v->full_det_buffering_time_c[k] + (v->lines_in_detc[k] - v->lines_in_detc_rounded_down_to_swath[k]) / v->swath_height_c[k] * (v->htotal[k] / v->pixel_clock[k]);
+			v->active_dram_clock_change_latency_margin_c = v->dppopp_buffering_c + v->effective_lb_latency_hiding_c + v->max_det_buffering_time_c - v->dram_clock_change_watermark;
+			if (v->active_dp_ps > 1.0) {
+				v->active_dram_clock_change_latency_margin_c = v->active_dram_clock_change_latency_margin_c - (1.0 - 1.0 / (v->active_dp_ps - 1.0)) * v->swath_height_c[k] * (v->htotal[k] / v->pixel_clock[k]);
+			}
+			v->active_dram_clock_change_latency_margin[k] =dcn_bw_min2(v->active_dram_clock_change_latency_margin_y, v->active_dram_clock_change_latency_margin_c);
+		}
+		else {
+			v->active_dram_clock_change_latency_margin[k] = v->active_dram_clock_change_latency_margin_y;
+		}
+		if (v->output_format[k] == dcn_bw_444) {
+			v->writeback_dram_clock_change_latency_margin = (v->writeback_luma_buffer_size + v->writeback_chroma_buffer_size) * 1024.0 / (v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0) - v->writeback_dram_clock_change_watermark;
+		}
+		else {
+			v->writeback_dram_clock_change_latency_margin =dcn_bw_min2(v->writeback_luma_buffer_size, 2.0 * v->writeback_chroma_buffer_size) * 1024.0 / (v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k])) - v->writeback_dram_clock_change_watermark;
+		}
+		if (v->output[k] == dcn_bw_writeback) {
+			v->active_dram_clock_change_latency_margin[k] =dcn_bw_min2(v->active_dram_clock_change_latency_margin[k], v->writeback_dram_clock_change_latency_margin);
+		}
+	}
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->allow_dram_clock_change_during_vblank[k] == dcn_bw_yes) {
+			v->v_blank_dram_clock_change_latency_margin[k] = (v->vtotal[k] - v->scaler_recout_height[k]) * (v->htotal[k] / v->pixel_clock[k]) -dcn_bw_max2(v->dram_clock_change_watermark, v->writeback_dram_clock_change_watermark);
+		}
+		else {
+			v->v_blank_dram_clock_change_latency_margin[k] = 0.0;
+		}
+	}
+	v->min_active_dram_clock_change_margin = 999999.0;
+	v->v_blank_of_min_active_dram_clock_change_margin = 999999.0;
+	v->second_min_active_dram_clock_change_margin = 999999.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->active_dram_clock_change_latency_margin[k] < v->min_active_dram_clock_change_margin) {
+			v->second_min_active_dram_clock_change_margin = v->min_active_dram_clock_change_margin;
+			v->min_active_dram_clock_change_margin = v->active_dram_clock_change_latency_margin[k];
+			v->v_blank_of_min_active_dram_clock_change_margin = v->v_blank_dram_clock_change_latency_margin[k];
+		}
+		else if (v->active_dram_clock_change_latency_margin[k] < v->second_min_active_dram_clock_change_margin) {
+			v->second_min_active_dram_clock_change_margin = v->active_dram_clock_change_latency_margin[k];
+		}
+	}
+	v->min_vblank_dram_clock_change_margin = 999999.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->min_vblank_dram_clock_change_margin > v->v_blank_dram_clock_change_latency_margin[k]) {
+			v->min_vblank_dram_clock_change_margin = v->v_blank_dram_clock_change_latency_margin[k];
+		}
+	}
+	if (v->synchronized_vblank == dcn_bw_yes || v->number_of_active_planes == 1) {
+		v->dram_clock_change_margin =dcn_bw_max2(v->min_active_dram_clock_change_margin, v->min_vblank_dram_clock_change_margin);
+	}
+	else if (v->v_blank_of_min_active_dram_clock_change_margin > v->min_active_dram_clock_change_margin) {
+		v->dram_clock_change_margin =dcn_bw_min2(v->second_min_active_dram_clock_change_margin, v->v_blank_of_min_active_dram_clock_change_margin);
+	}
+	else {
+		v->dram_clock_change_margin = v->min_active_dram_clock_change_margin;
+	}
+	if (v->min_active_dram_clock_change_margin > 0.0) {
+		v->dram_clock_change_support = dcn_bw_supported_in_v_active;
+	}
+	else if (v->dram_clock_change_margin > 0.0) {
+		v->dram_clock_change_support = dcn_bw_supported_in_v_blank;
+	}
+	else {
+		v->dram_clock_change_support = dcn_bw_not_supported;
+	}
+	/*maximum bandwidth used*/
+
+	v->wr_bandwidth = 0.0;
+	for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+		if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444) {
+			v->wr_bandwidth = v->wr_bandwidth + v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0;
+		}
+		else if (v->output[k] == dcn_bw_writeback) {
+			v->wr_bandwidth = v->wr_bandwidth + v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 1.5;
+		}
+	}
+	v->max_used_bw = v->max_rd_bandwidth + v->wr_bandwidth;
+}

+ 37 - 0
drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h

@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCN_CALC_AUTO_H_
+#define _DCN_CALC_AUTO_H_
+
+#include "dcn_calcs.h"
+
+void scaler_settings_calculation(struct dcn_bw_internal_vars *v);
+void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v);
+void display_pipe_configuration(struct dcn_bw_internal_vars *v);
+void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(
+		struct dcn_bw_internal_vars *v);
+
+#endif /* _DCN_CALC_AUTO_H_ */

+ 120 - 0
drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c

@@ -0,0 +1,120 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn_calc_math.h"
+
+float dcn_bw_mod(const float arg1, const float arg2)
+{
+	if (arg1 != arg1)
+		return arg2;
+	if (arg2 != arg2)
+		return arg1;
+	return arg1 - arg1 * ((int) (arg1 / arg2));
+}
+
+float dcn_bw_min2(const float arg1, const float arg2)
+{
+	if (arg1 != arg1)
+		return arg2;
+	if (arg2 != arg2)
+		return arg1;
+	return arg1 < arg2 ? arg1 : arg2;
+}
+
+unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2)
+{
+	if (arg1 != arg1)
+		return arg2;
+	if (arg2 != arg2)
+		return arg1;
+	return arg1 > arg2 ? arg1 : arg2;
+}
+float dcn_bw_max2(const float arg1, const float arg2)
+{
+	if (arg1 != arg1)
+		return arg2;
+	if (arg2 != arg2)
+		return arg1;
+	return arg1 > arg2 ? arg1 : arg2;
+}
+
+float dcn_bw_floor2(const float arg, const float significance)
+{
+	if (significance == 0)
+		return 0;
+	return ((int) (arg / significance)) * significance;
+}
+
+float dcn_bw_ceil2(const float arg, const float significance)
+{
+	float flr = dcn_bw_floor2(arg, significance);
+	if (significance == 0)
+		return 0;
+	return flr + 0.00001 >= arg ? arg : flr + significance;
+}
+
+float dcn_bw_max3(float v1, float v2, float v3)
+{
+	return v3 > dcn_bw_max2(v1, v2) ? v3 : dcn_bw_max2(v1, v2);
+}
+
+float dcn_bw_max5(float v1, float v2, float v3, float v4, float v5)
+{
+	return dcn_bw_max3(v1, v2, v3) > dcn_bw_max2(v4, v5) ? dcn_bw_max3(v1, v2, v3) : dcn_bw_max2(v4, v5);
+}
+
+float dcn_bw_pow(float a, float exp)
+{
+	float temp;
+	/*ASSERT(exp == (int)exp);*/
+	if ((int)exp == 0)
+		return 1;
+	temp = dcn_bw_pow(a, (int)(exp / 2));
+	if (((int)exp % 2) == 0) {
+		return temp * temp;
+	} else {
+		if ((int)exp > 0)
+			return a * temp * temp;
+		else
+			return (temp * temp) / a;
+	}
+}
+
+float dcn_bw_log(float a, float b)
+{
+	int * const exp_ptr = (int *)(&a);
+	int x = *exp_ptr;
+	const int log_2 = ((x >> 23) & 255) - 128;
+	x &= ~(255 << 23);
+	x += 127 << 23;
+	*exp_ptr = x;
+
+	a = ((-1.0f / 3) * a + 2) * a - 2.0f / 3;
+
+	if (b > 2.00001 || b < 1.99999)
+		return (a + log_2) / dcn_bw_log(b, 2);
+	else
+		return (a + log_2);
+}

+ 40 - 0
drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h

@@ -0,0 +1,40 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCN_CALC_MATH_H_
+#define _DCN_CALC_MATH_H_
+
+float dcn_bw_mod(const float arg1, const float arg2);
+float dcn_bw_min2(const float arg1, const float arg2);
+unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2);
+float dcn_bw_max2(const float arg1, const float arg2);
+float dcn_bw_floor2(const float arg, const float significance);
+float dcn_bw_ceil2(const float arg, const float significance);
+float dcn_bw_max3(float v1, float v2, float v3);
+float dcn_bw_max5(float v1, float v2, float v3, float v4, float v5);
+float dcn_bw_pow(float a, float exp);
+float dcn_bw_log(float a, float b);
+
+#endif /* _DCN_CALC_MATH_H_ */

+ 1626 - 0
drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c

@@ -0,0 +1,1626 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn_calcs.h"
+#include "dcn_calc_auto.h"
+#include "dc.h"
+#include "dal_asic_id.h"
+
+#include "resource.h"
+#include "dcn10/dcn10_resource.h"
+#include "dcn_calc_math.h"
+
+/* Defaults from spreadsheet rev#247 */
+const struct dcn_soc_bounding_box dcn10_soc_defaults = {
+		/* latencies */
+		.sr_exit_time = 17, /*us*/
+		.sr_enter_plus_exit_time = 19, /*us*/
+		.urgent_latency = 4, /*us*/
+		.dram_clock_change_latency = 17, /*us*/
+		.write_back_latency = 12, /*us*/
+		.percent_of_ideal_drambw_received_after_urg_latency = 80, /*%*/
+
+		/* below default clocks derived from STA target base on
+		 * slow-slow corner + 10% margin with voltages aligned to FCLK.
+		 *
+		 * Use these value if fused value doesn't make sense as earlier
+		 * part don't have correct value fused */
+		/* default DCF CLK DPM on RV*/
+		.dcfclkv_max0p9 = 655,	/* MHz, = 3600/5.5 */
+		.dcfclkv_nom0p8 = 626,	/* MHz, = 3600/5.75 */
+		.dcfclkv_mid0p72 = 600,	/* MHz, = 3600/6, bypass */
+		.dcfclkv_min0p65 = 300,	/* MHz, = 3600/12, bypass */
+
+		/* default DISP CLK voltage state on RV */
+		.max_dispclk_vmax0p9 = 1108,	/* MHz, = 3600/3.25 */
+		.max_dispclk_vnom0p8 = 1029,	/* MHz, = 3600/3.5 */
+		.max_dispclk_vmid0p72 = 960,	/* MHz, = 3600/3.75 */
+		.max_dispclk_vmin0p65 = 626,	/* MHz, = 3600/5.75 */
+
+		/* default DPP CLK voltage state on RV */
+		.max_dppclk_vmax0p9 = 720,	/* MHz, = 3600/5 */
+		.max_dppclk_vnom0p8 = 686,	/* MHz, = 3600/5.25 */
+		.max_dppclk_vmid0p72 = 626,	/* MHz, = 3600/5.75 */
+		.max_dppclk_vmin0p65 = 400,	/* MHz, = 3600/9 */
+
+		/* default PHY CLK voltage state on RV */
+		.phyclkv_max0p9 = 900, /*MHz*/
+		.phyclkv_nom0p8 = 847, /*MHz*/
+		.phyclkv_mid0p72 = 800, /*MHz*/
+		.phyclkv_min0p65 = 600, /*MHz*/
+
+		/* BW depend on FCLK, MCLK, # of channels */
+		/* dual channel BW */
+		.fabric_and_dram_bandwidth_vmax0p9 = 38.4f, /*GB/s*/
+		.fabric_and_dram_bandwidth_vnom0p8 = 34.133f, /*GB/s*/
+		.fabric_and_dram_bandwidth_vmid0p72 = 29.866f, /*GB/s*/
+		.fabric_and_dram_bandwidth_vmin0p65 = 12.8f, /*GB/s*/
+		/* single channel BW
+		.fabric_and_dram_bandwidth_vmax0p9 = 19.2f,
+		.fabric_and_dram_bandwidth_vnom0p8 = 17.066f,
+		.fabric_and_dram_bandwidth_vmid0p72 = 14.933f,
+		.fabric_and_dram_bandwidth_vmin0p65 = 12.8f,
+		*/
+
+		.number_of_channels = 2,
+
+		.socclk = 208, /*MHz*/
+		.downspreading = 0.5f, /*%*/
+		.round_trip_ping_latency_cycles = 128, /*DCFCLK Cycles*/
+		.urgent_out_of_order_return_per_channel = 256, /*bytes*/
+		.vmm_page_size = 4096, /*bytes*/
+		.return_bus_width = 64, /*bytes*/
+		.max_request_size = 256, /*bytes*/
+
+		/* Depends on user class (client vs embedded, workstation, etc) */
+		.percent_disp_bw_limit = 0.3f /*%*/
+};
+
+const struct dcn_ip_params dcn10_ip_defaults = {
+		.rob_buffer_size_in_kbyte = 64,
+		.det_buffer_size_in_kbyte = 164,
+		.dpp_output_buffer_pixels = 2560,
+		.opp_output_buffer_lines = 1,
+		.pixel_chunk_size_in_kbyte = 8,
+		.pte_enable = dcn_bw_yes,
+		.pte_chunk_size = 2, /*kbytes*/
+		.meta_chunk_size = 2, /*kbytes*/
+		.writeback_chunk_size = 2, /*kbytes*/
+		.odm_capability = dcn_bw_no,
+		.dsc_capability = dcn_bw_no,
+		.line_buffer_size = 589824, /*bit*/
+		.max_line_buffer_lines = 12,
+		.is_line_buffer_bpp_fixed = dcn_bw_no,
+		.line_buffer_fixed_bpp = dcn_bw_na,
+		.writeback_luma_buffer_size = 12, /*kbytes*/
+		.writeback_chroma_buffer_size = 8, /*kbytes*/
+		.max_num_dpp = 4,
+		.max_num_writeback = 2,
+		.max_dchub_topscl_throughput = 4, /*pixels/dppclk*/
+		.max_pscl_tolb_throughput = 2, /*pixels/dppclk*/
+		.max_lb_tovscl_throughput = 4, /*pixels/dppclk*/
+		.max_vscl_tohscl_throughput = 4, /*pixels/dppclk*/
+		.max_hscl_ratio = 4,
+		.max_vscl_ratio = 4,
+		.max_hscl_taps = 8,
+		.max_vscl_taps = 8,
+		.pte_buffer_size_in_requests = 42,
+		.dispclk_ramping_margin = 1, /*%*/
+		.under_scan_factor = 1.11f,
+		.max_inter_dcn_tile_repeaters = 8,
+		.can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = dcn_bw_no,
+		.bug_forcing_luma_and_chroma_request_to_same_size_fixed = dcn_bw_no,
+		.dcfclk_cstate_latency = 10 /*TODO clone of something else? sr_enter_plus_exit_time?*/
+};
+
+static enum dcn_bw_defs tl_sw_mode_to_bw_defs(enum swizzle_mode_values sw_mode)
+{
+	switch (sw_mode) {
+	case DC_SW_LINEAR:
+		return dcn_bw_sw_linear;
+	case DC_SW_4KB_S:
+		return dcn_bw_sw_4_kb_s;
+	case DC_SW_4KB_D:
+		return dcn_bw_sw_4_kb_d;
+	case DC_SW_64KB_S:
+		return dcn_bw_sw_64_kb_s;
+	case DC_SW_64KB_D:
+		return dcn_bw_sw_64_kb_d;
+	case DC_SW_VAR_S:
+		return dcn_bw_sw_var_s;
+	case DC_SW_VAR_D:
+		return dcn_bw_sw_var_d;
+	case DC_SW_64KB_S_T:
+		return dcn_bw_sw_64_kb_s_t;
+	case DC_SW_64KB_D_T:
+		return dcn_bw_sw_64_kb_d_t;
+	case DC_SW_4KB_S_X:
+		return dcn_bw_sw_4_kb_s_x;
+	case DC_SW_4KB_D_X:
+		return dcn_bw_sw_4_kb_d_x;
+	case DC_SW_64KB_S_X:
+		return dcn_bw_sw_64_kb_s_x;
+	case DC_SW_64KB_D_X:
+		return dcn_bw_sw_64_kb_d_x;
+	case DC_SW_VAR_S_X:
+		return dcn_bw_sw_var_s_x;
+	case DC_SW_VAR_D_X:
+		return dcn_bw_sw_var_d_x;
+	case DC_SW_256B_S:
+	case DC_SW_256_D:
+	case DC_SW_256_R:
+	case DC_SW_4KB_R:
+	case DC_SW_64KB_R:
+	case DC_SW_VAR_R:
+	case DC_SW_4KB_R_X:
+	case DC_SW_64KB_R_X:
+	case DC_SW_VAR_R_X:
+	default:
+		BREAK_TO_DEBUGGER(); /*not in formula*/
+		return dcn_bw_sw_4_kb_s;
+	}
+}
+
+static int tl_lb_bpp_to_int(enum lb_pixel_depth depth)
+{
+	switch (depth) {
+	case LB_PIXEL_DEPTH_18BPP:
+		return 18;
+	case LB_PIXEL_DEPTH_24BPP:
+		return 24;
+	case LB_PIXEL_DEPTH_30BPP:
+		return 30;
+	case LB_PIXEL_DEPTH_36BPP:
+		return 36;
+	default:
+		return 30;
+	}
+}
+
+static enum dcn_bw_defs tl_pixel_format_to_bw_defs(enum surface_pixel_format format)
+{
+	switch (format) {
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+	case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+		return dcn_bw_rgb_sub_16;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+		return dcn_bw_rgb_sub_32;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+		return dcn_bw_rgb_sub_64;
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+		return dcn_bw_yuv420_sub_8;
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+		return dcn_bw_yuv420_sub_10;
+	default:
+		return dcn_bw_rgb_sub_32;
+	}
+}
+
+static void pipe_ctx_to_e2e_pipe_params (
+		const struct pipe_ctx *pipe,
+		struct _vcs_dpi_display_pipe_params_st *input)
+{
+	input->src.is_hsplit = false;
+	if (pipe->top_pipe != NULL && pipe->top_pipe->plane_state == pipe->plane_state)
+		input->src.is_hsplit = true;
+	else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state)
+		input->src.is_hsplit = true;
+
+	input->src.dcc                 = pipe->plane_state->dcc.enable;
+	input->src.dcc_rate            = 1;
+	input->src.meta_pitch          = pipe->plane_state->dcc.grph.meta_pitch;
+	input->src.source_scan         = dm_horz;
+	input->src.sw_mode             = pipe->plane_state->tiling_info.gfx9.swizzle;
+
+	input->src.viewport_width      = pipe->plane_res.scl_data.viewport.width;
+	input->src.viewport_height     = pipe->plane_res.scl_data.viewport.height;
+	input->src.data_pitch          = pipe->plane_res.scl_data.viewport.width;
+	input->src.data_pitch_c        = pipe->plane_res.scl_data.viewport.width;
+	input->src.cur0_src_width      = 128; /* TODO: Cursor calcs, not curently stored */
+	input->src.cur0_bpp            = 32;
+
+	switch (pipe->plane_state->tiling_info.gfx9.swizzle) {
+	/* for 4/8/16 high tiles */
+	case DC_SW_LINEAR:
+		input->src.is_display_sw = 1;
+		input->src.macro_tile_size = dm_4k_tile;
+		break;
+	case DC_SW_4KB_S:
+	case DC_SW_4KB_S_X:
+		input->src.is_display_sw = 0;
+		input->src.macro_tile_size = dm_4k_tile;
+		break;
+	case DC_SW_64KB_S:
+	case DC_SW_64KB_S_X:
+	case DC_SW_64KB_S_T:
+		input->src.is_display_sw = 0;
+		input->src.macro_tile_size = dm_64k_tile;
+		break;
+	case DC_SW_VAR_S:
+	case DC_SW_VAR_S_X:
+		input->src.is_display_sw = 0;
+		input->src.macro_tile_size = dm_256k_tile;
+		break;
+
+	/* For 64bpp 2 high tiles */
+	case DC_SW_4KB_D:
+	case DC_SW_4KB_D_X:
+		input->src.is_display_sw = 1;
+		input->src.macro_tile_size = dm_4k_tile;
+		break;
+	case DC_SW_64KB_D:
+	case DC_SW_64KB_D_X:
+	case DC_SW_64KB_D_T:
+		input->src.is_display_sw = 1;
+		input->src.macro_tile_size = dm_64k_tile;
+		break;
+	case DC_SW_VAR_D:
+	case DC_SW_VAR_D_X:
+		input->src.is_display_sw = 1;
+		input->src.macro_tile_size = dm_256k_tile;
+		break;
+
+	/* Unsupported swizzle modes for dcn */
+	case DC_SW_256B_S:
+	default:
+		ASSERT(0); /* Not supported */
+		break;
+	}
+
+	switch (pipe->plane_state->rotation) {
+	case ROTATION_ANGLE_0:
+	case ROTATION_ANGLE_180:
+		input->src.source_scan = dm_horz;
+		break;
+	case ROTATION_ANGLE_90:
+	case ROTATION_ANGLE_270:
+		input->src.source_scan = dm_vert;
+		break;
+	default:
+		ASSERT(0); /* Not supported */
+		break;
+	}
+
+	/* TODO: Fix pixel format mappings */
+	switch (pipe->plane_state->format) {
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+		input->src.source_format = dm_420_8;
+		input->src.viewport_width_c    = input->src.viewport_width / 2;
+		input->src.viewport_height_c   = input->src.viewport_height / 2;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+		input->src.source_format = dm_420_10;
+		input->src.viewport_width_c    = input->src.viewport_width / 2;
+		input->src.viewport_height_c   = input->src.viewport_height / 2;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+		input->src.source_format = dm_444_64;
+		input->src.viewport_width_c    = input->src.viewport_width;
+		input->src.viewport_height_c   = input->src.viewport_height;
+		break;
+	default:
+		input->src.source_format = dm_444_32;
+		input->src.viewport_width_c    = input->src.viewport_width;
+		input->src.viewport_height_c   = input->src.viewport_height;
+		break;
+	}
+
+	input->scale_taps.htaps                = pipe->plane_res.scl_data.taps.h_taps;
+	input->scale_ratio_depth.hscl_ratio    = pipe->plane_res.scl_data.ratios.horz.value/4294967296.0;
+	input->scale_ratio_depth.vscl_ratio    = pipe->plane_res.scl_data.ratios.vert.value/4294967296.0;
+	input->scale_ratio_depth.vinit =  pipe->plane_res.scl_data.inits.v.value/4294967296.0;
+	if (input->scale_ratio_depth.vinit < 1.0)
+			input->scale_ratio_depth.vinit = 1;
+	input->scale_taps.vtaps = pipe->plane_res.scl_data.taps.v_taps;
+	input->scale_taps.vtaps_c = pipe->plane_res.scl_data.taps.v_taps_c;
+	input->scale_taps.htaps_c              = pipe->plane_res.scl_data.taps.h_taps_c;
+	input->scale_ratio_depth.hscl_ratio_c  = pipe->plane_res.scl_data.ratios.horz_c.value/4294967296.0;
+	input->scale_ratio_depth.vscl_ratio_c  = pipe->plane_res.scl_data.ratios.vert_c.value/4294967296.0;
+	input->scale_ratio_depth.vinit_c       = pipe->plane_res.scl_data.inits.v_c.value/4294967296.0;
+	if (input->scale_ratio_depth.vinit_c < 1.0)
+			input->scale_ratio_depth.vinit_c = 1;
+	switch (pipe->plane_res.scl_data.lb_params.depth) {
+	case LB_PIXEL_DEPTH_30BPP:
+		input->scale_ratio_depth.lb_depth = 30; break;
+	case LB_PIXEL_DEPTH_36BPP:
+		input->scale_ratio_depth.lb_depth = 36; break;
+	default:
+		input->scale_ratio_depth.lb_depth = 24; break;
+	}
+
+
+	input->dest.vactive        = pipe->stream->timing.v_addressable + pipe->stream->timing.v_border_top
+			+ pipe->stream->timing.v_border_bottom;
+
+	input->dest.recout_width   = pipe->plane_res.scl_data.recout.width;
+	input->dest.recout_height  = pipe->plane_res.scl_data.recout.height;
+
+	input->dest.full_recout_width   = pipe->plane_res.scl_data.recout.width;
+	input->dest.full_recout_height  = pipe->plane_res.scl_data.recout.height;
+
+	input->dest.htotal         = pipe->stream->timing.h_total;
+	input->dest.hblank_start   = input->dest.htotal - pipe->stream->timing.h_front_porch;
+	input->dest.hblank_end     = input->dest.hblank_start
+			- pipe->stream->timing.h_addressable
+			- pipe->stream->timing.h_border_left
+			- pipe->stream->timing.h_border_right;
+
+	input->dest.vtotal         = pipe->stream->timing.v_total;
+	input->dest.vblank_start   = input->dest.vtotal - pipe->stream->timing.v_front_porch;
+	input->dest.vblank_end     = input->dest.vblank_start
+			- pipe->stream->timing.v_addressable
+			- pipe->stream->timing.v_border_bottom
+			- pipe->stream->timing.v_border_top;
+	input->dest.pixel_rate_mhz = pipe->stream->timing.pix_clk_khz/1000.0;
+	input->dest.vstartup_start = pipe->pipe_dlg_param.vstartup_start;
+	input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset;
+	input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset;
+	input->dest.vupdate_width = pipe->pipe_dlg_param.vupdate_width;
+
+}
+
+static void dcn_bw_calc_rq_dlg_ttu(
+		const struct dc *dc,
+		const struct dcn_bw_internal_vars *v,
+		struct pipe_ctx *pipe,
+		int in_idx)
+{
+	struct display_mode_lib *dml = (struct display_mode_lib *)(&dc->dml);
+	struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &pipe->dlg_regs;
+	struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &pipe->ttu_regs;
+	struct _vcs_dpi_display_rq_regs_st *rq_regs = &pipe->rq_regs;
+	struct _vcs_dpi_display_rq_params_st rq_param = {0};
+	struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param = {0};
+	struct _vcs_dpi_display_e2e_pipe_params_st input = { { { 0 } } };
+	float total_active_bw = 0;
+	float total_prefetch_bw = 0;
+	int total_flip_bytes = 0;
+	int i;
+
+	for (i = 0; i < number_of_planes; i++) {
+		total_active_bw += v->read_bandwidth[i];
+		total_prefetch_bw += v->prefetch_bandwidth[i];
+		total_flip_bytes += v->total_immediate_flip_bytes[i];
+	}
+	dlg_sys_param.total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw);
+	if (dlg_sys_param.total_flip_bw < 0.0)
+		dlg_sys_param.total_flip_bw = 0;
+
+	dlg_sys_param.t_mclk_wm_us = v->dram_clock_change_watermark;
+	dlg_sys_param.t_sr_wm_us = v->stutter_enter_plus_exit_watermark;
+	dlg_sys_param.t_urg_wm_us = v->urgent_watermark;
+	dlg_sys_param.t_extra_us = v->urgent_extra_latency;
+	dlg_sys_param.deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep;
+	dlg_sys_param.total_flip_bytes = total_flip_bytes;
+
+	pipe_ctx_to_e2e_pipe_params(pipe, &input.pipe);
+	input.clks_cfg.dcfclk_mhz = v->dcfclk;
+	input.clks_cfg.dispclk_mhz = v->dispclk;
+	input.clks_cfg.dppclk_mhz = v->dppclk;
+	input.clks_cfg.refclk_mhz = dc->res_pool->ref_clock_inKhz/1000;
+	input.clks_cfg.socclk_mhz = v->socclk;
+	input.clks_cfg.voltage = v->voltage_level;
+//	dc->dml.logger = pool->base.logger;
+	input.dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444;
+	input.dout.output_type  = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp;
+	//input[in_idx].dout.output_standard;
+	switch (v->output_deep_color[in_idx]) {
+	case dcn_bw_encoder_12bpc:
+		input.dout.output_bpc = dm_out_12;
+	break;
+	case dcn_bw_encoder_10bpc:
+		input.dout.output_bpc = dm_out_10;
+	break;
+	case dcn_bw_encoder_8bpc:
+	default:
+		input.dout.output_bpc = dm_out_8;
+	break;
+	}
+
+	/*todo: soc->sr_enter_plus_exit_time??*/
+	dlg_sys_param.t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
+
+	dml1_rq_dlg_get_rq_params(dml, &rq_param, input.pipe.src);
+	dml1_extract_rq_regs(dml, rq_regs, rq_param);
+	dml1_rq_dlg_get_dlg_params(
+			dml,
+			dlg_regs,
+			ttu_regs,
+			rq_param.dlg,
+			dlg_sys_param,
+			input,
+			true,
+			true,
+			v->pte_enable == dcn_bw_yes,
+			pipe->plane_state->flip_immediate);
+}
+
+static void split_stream_across_pipes(
+		struct resource_context *res_ctx,
+		const struct resource_pool *pool,
+		struct pipe_ctx *primary_pipe,
+		struct pipe_ctx *secondary_pipe)
+{
+	int pipe_idx = secondary_pipe->pipe_idx;
+
+	if (!primary_pipe->plane_state)
+		return;
+
+	*secondary_pipe = *primary_pipe;
+
+	secondary_pipe->pipe_idx = pipe_idx;
+	secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx];
+	secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx];
+	secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
+	secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
+	secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
+	if (primary_pipe->bottom_pipe) {
+		ASSERT(primary_pipe->bottom_pipe != secondary_pipe);
+		secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
+		secondary_pipe->bottom_pipe->top_pipe = secondary_pipe;
+	}
+	primary_pipe->bottom_pipe = secondary_pipe;
+	secondary_pipe->top_pipe = primary_pipe;
+
+	resource_build_scaling_params(primary_pipe);
+	resource_build_scaling_params(secondary_pipe);
+}
+
+static void calc_wm_sets_and_perf_params(
+		struct dc_state *context,
+		struct dcn_bw_internal_vars *v)
+{
+	/* Calculate set A last to keep internal var state consistent for required config */
+	if (v->voltage_level < 2) {
+		v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vnom0p8;
+		v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vnom0p8;
+		v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vnom0p8;
+		dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+		context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
+			v->stutter_exit_watermark * 1000;
+		context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
+				v->stutter_enter_plus_exit_watermark * 1000;
+		context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
+				v->dram_clock_change_watermark * 1000;
+		context->bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+		context->bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
+
+		v->dcfclk_per_state[1] = v->dcfclkv_nom0p8;
+		v->dcfclk_per_state[0] = v->dcfclkv_nom0p8;
+		v->dcfclk = v->dcfclkv_nom0p8;
+		dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+		context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
+			v->stutter_exit_watermark * 1000;
+		context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
+				v->stutter_enter_plus_exit_watermark * 1000;
+		context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
+				v->dram_clock_change_watermark * 1000;
+		context->bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+		context->bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
+	}
+
+	if (v->voltage_level < 3) {
+		v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vmax0p9;
+		v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmax0p9;
+		v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmax0p9;
+		v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vmax0p9;
+		v->dcfclk_per_state[2] = v->dcfclkv_max0p9;
+		v->dcfclk_per_state[1] = v->dcfclkv_max0p9;
+		v->dcfclk_per_state[0] = v->dcfclkv_max0p9;
+		v->dcfclk = v->dcfclkv_max0p9;
+		dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+		context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
+			v->stutter_exit_watermark * 1000;
+		context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
+				v->stutter_enter_plus_exit_watermark * 1000;
+		context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
+				v->dram_clock_change_watermark * 1000;
+		context->bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+		context->bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
+	}
+
+	v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8;
+	v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmid0p72;
+	v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmin0p65;
+	v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_per_state[v->voltage_level];
+	v->dcfclk_per_state[2] = v->dcfclkv_nom0p8;
+	v->dcfclk_per_state[1] = v->dcfclkv_mid0p72;
+	v->dcfclk_per_state[0] = v->dcfclkv_min0p65;
+	v->dcfclk = v->dcfclk_per_state[v->voltage_level];
+	dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+	context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
+		v->stutter_exit_watermark * 1000;
+	context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+			v->stutter_enter_plus_exit_watermark * 1000;
+	context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
+			v->dram_clock_change_watermark * 1000;
+	context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+	context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
+	if (v->voltage_level >= 2) {
+		context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
+		context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
+	}
+	if (v->voltage_level >= 3)
+		context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+}
+
+static bool dcn_bw_apply_registry_override(struct dc *dc)
+{
+	bool updated = false;
+
+	kernel_fpu_begin();
+	if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns
+			&& dc->debug.sr_exit_time_ns) {
+		updated = true;
+		dc->dcn_soc->sr_exit_time = dc->debug.sr_exit_time_ns / 1000.0;
+	}
+
+	if ((int)(dc->dcn_soc->sr_enter_plus_exit_time * 1000)
+				!= dc->debug.sr_enter_plus_exit_time_ns
+			&& dc->debug.sr_enter_plus_exit_time_ns) {
+		updated = true;
+		dc->dcn_soc->sr_enter_plus_exit_time =
+				dc->debug.sr_enter_plus_exit_time_ns / 1000.0;
+	}
+
+	if ((int)(dc->dcn_soc->urgent_latency * 1000) != dc->debug.urgent_latency_ns
+			&& dc->debug.urgent_latency_ns) {
+		updated = true;
+		dc->dcn_soc->urgent_latency = dc->debug.urgent_latency_ns / 1000.0;
+	}
+
+	if ((int)(dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency * 1000)
+				!= dc->debug.percent_of_ideal_drambw
+			&& dc->debug.percent_of_ideal_drambw) {
+		updated = true;
+		dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency =
+				dc->debug.percent_of_ideal_drambw;
+	}
+
+	if ((int)(dc->dcn_soc->dram_clock_change_latency * 1000)
+				!= dc->debug.dram_clock_change_latency_ns
+			&& dc->debug.dram_clock_change_latency_ns) {
+		updated = true;
+		dc->dcn_soc->dram_clock_change_latency =
+				dc->debug.dram_clock_change_latency_ns / 1000.0;
+	}
+	kernel_fpu_end();
+
+	return updated;
+}
+
+void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v)
+{
+	/*
+	 * disable optional pipe split by lower dispclk bounding box
+	 * at DPM0
+	 */
+	v->max_dispclk[0] = v->max_dppclk_vmin0p65;
+}
+
+void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
+		unsigned int pixel_rate_khz)
+{
+	float pixel_rate_mhz = pixel_rate_khz / 1000;
+
+	/*
+	 * force enabling pipe split by lower dpp clock for DPM0 to just
+	 * below the specify pixel_rate, so bw calc would split pipe.
+	 */
+	if (pixel_rate_mhz < v->max_dppclk[0])
+		v->max_dppclk[0] = pixel_rate_mhz;
+}
+
+void hack_bounding_box(struct dcn_bw_internal_vars *v,
+		struct dc_debug *dbg,
+		struct dc_state *context)
+{
+	if (dbg->pipe_split_policy == MPC_SPLIT_AVOID) {
+		hack_disable_optional_pipe_split(v);
+	}
+
+	if (dbg->pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP &&
+		context->stream_count >= 2) {
+		hack_disable_optional_pipe_split(v);
+	}
+
+	if (context->stream_count == 1 &&
+			dbg->force_single_disp_pipe_split) {
+		struct dc_stream_state *stream0 = context->streams[0];
+
+		hack_force_pipe_split(v, stream0->timing.pix_clk_khz);
+	}
+}
+
+bool dcn_validate_bandwidth(
+		struct dc *dc,
+		struct dc_state *context)
+{
+	const struct resource_pool *pool = dc->res_pool;
+	struct dcn_bw_internal_vars *v = &context->dcn_bw_vars;
+	int i, input_idx;
+	int vesa_sync_start, asic_blank_end, asic_blank_start;
+	bool bw_limit_pass;
+	float bw_limit;
+
+	PERFORMANCE_TRACE_START();
+	if (dcn_bw_apply_registry_override(dc))
+		dcn_bw_sync_calcs_and_dml(dc);
+
+	memset(v, 0, sizeof(*v));
+	kernel_fpu_begin();
+	v->sr_exit_time = dc->dcn_soc->sr_exit_time;
+	v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time;
+	v->urgent_latency = dc->dcn_soc->urgent_latency;
+	v->write_back_latency = dc->dcn_soc->write_back_latency;
+	v->percent_of_ideal_drambw_received_after_urg_latency =
+			dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency;
+
+	v->dcfclkv_min0p65 = dc->dcn_soc->dcfclkv_min0p65;
+	v->dcfclkv_mid0p72 = dc->dcn_soc->dcfclkv_mid0p72;
+	v->dcfclkv_nom0p8 = dc->dcn_soc->dcfclkv_nom0p8;
+	v->dcfclkv_max0p9 = dc->dcn_soc->dcfclkv_max0p9;
+
+	v->max_dispclk_vmin0p65 = dc->dcn_soc->max_dispclk_vmin0p65;
+	v->max_dispclk_vmid0p72 = dc->dcn_soc->max_dispclk_vmid0p72;
+	v->max_dispclk_vnom0p8 = dc->dcn_soc->max_dispclk_vnom0p8;
+	v->max_dispclk_vmax0p9 = dc->dcn_soc->max_dispclk_vmax0p9;
+
+	v->max_dppclk_vmin0p65 = dc->dcn_soc->max_dppclk_vmin0p65;
+	v->max_dppclk_vmid0p72 = dc->dcn_soc->max_dppclk_vmid0p72;
+	v->max_dppclk_vnom0p8 = dc->dcn_soc->max_dppclk_vnom0p8;
+	v->max_dppclk_vmax0p9 = dc->dcn_soc->max_dppclk_vmax0p9;
+
+	v->socclk = dc->dcn_soc->socclk;
+
+	v->fabric_and_dram_bandwidth_vmin0p65 = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65;
+	v->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72;
+	v->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8;
+	v->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9;
+
+	v->phyclkv_min0p65 = dc->dcn_soc->phyclkv_min0p65;
+	v->phyclkv_mid0p72 = dc->dcn_soc->phyclkv_mid0p72;
+	v->phyclkv_nom0p8 = dc->dcn_soc->phyclkv_nom0p8;
+	v->phyclkv_max0p9 = dc->dcn_soc->phyclkv_max0p9;
+
+	v->downspreading = dc->dcn_soc->downspreading;
+	v->round_trip_ping_latency_cycles = dc->dcn_soc->round_trip_ping_latency_cycles;
+	v->urgent_out_of_order_return_per_channel = dc->dcn_soc->urgent_out_of_order_return_per_channel;
+	v->number_of_channels = dc->dcn_soc->number_of_channels;
+	v->vmm_page_size = dc->dcn_soc->vmm_page_size;
+	v->dram_clock_change_latency = dc->dcn_soc->dram_clock_change_latency;
+	v->return_bus_width = dc->dcn_soc->return_bus_width;
+
+	v->rob_buffer_size_in_kbyte = dc->dcn_ip->rob_buffer_size_in_kbyte;
+	v->det_buffer_size_in_kbyte = dc->dcn_ip->det_buffer_size_in_kbyte;
+	v->dpp_output_buffer_pixels = dc->dcn_ip->dpp_output_buffer_pixels;
+	v->opp_output_buffer_lines = dc->dcn_ip->opp_output_buffer_lines;
+	v->pixel_chunk_size_in_kbyte = dc->dcn_ip->pixel_chunk_size_in_kbyte;
+	v->pte_enable = dc->dcn_ip->pte_enable;
+	v->pte_chunk_size = dc->dcn_ip->pte_chunk_size;
+	v->meta_chunk_size = dc->dcn_ip->meta_chunk_size;
+	v->writeback_chunk_size = dc->dcn_ip->writeback_chunk_size;
+	v->odm_capability = dc->dcn_ip->odm_capability;
+	v->dsc_capability = dc->dcn_ip->dsc_capability;
+	v->line_buffer_size = dc->dcn_ip->line_buffer_size;
+	v->is_line_buffer_bpp_fixed = dc->dcn_ip->is_line_buffer_bpp_fixed;
+	v->line_buffer_fixed_bpp = dc->dcn_ip->line_buffer_fixed_bpp;
+	v->max_line_buffer_lines = dc->dcn_ip->max_line_buffer_lines;
+	v->writeback_luma_buffer_size = dc->dcn_ip->writeback_luma_buffer_size;
+	v->writeback_chroma_buffer_size = dc->dcn_ip->writeback_chroma_buffer_size;
+	v->max_num_dpp = dc->dcn_ip->max_num_dpp;
+	v->max_num_writeback = dc->dcn_ip->max_num_writeback;
+	v->max_dchub_topscl_throughput = dc->dcn_ip->max_dchub_topscl_throughput;
+	v->max_pscl_tolb_throughput = dc->dcn_ip->max_pscl_tolb_throughput;
+	v->max_lb_tovscl_throughput = dc->dcn_ip->max_lb_tovscl_throughput;
+	v->max_vscl_tohscl_throughput = dc->dcn_ip->max_vscl_tohscl_throughput;
+	v->max_hscl_ratio = dc->dcn_ip->max_hscl_ratio;
+	v->max_vscl_ratio = dc->dcn_ip->max_vscl_ratio;
+	v->max_hscl_taps = dc->dcn_ip->max_hscl_taps;
+	v->max_vscl_taps = dc->dcn_ip->max_vscl_taps;
+	v->under_scan_factor = dc->dcn_ip->under_scan_factor;
+	v->pte_buffer_size_in_requests = dc->dcn_ip->pte_buffer_size_in_requests;
+	v->dispclk_ramping_margin = dc->dcn_ip->dispclk_ramping_margin;
+	v->max_inter_dcn_tile_repeaters = dc->dcn_ip->max_inter_dcn_tile_repeaters;
+	v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one =
+			dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
+	v->bug_forcing_luma_and_chroma_request_to_same_size_fixed =
+			dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed;
+
+	v->voltage[5] = dcn_bw_no_support;
+	v->voltage[4] = dcn_bw_v_max0p9;
+	v->voltage[3] = dcn_bw_v_max0p9;
+	v->voltage[2] = dcn_bw_v_nom0p8;
+	v->voltage[1] = dcn_bw_v_mid0p72;
+	v->voltage[0] = dcn_bw_v_min0p65;
+	v->fabric_and_dram_bandwidth_per_state[5] = v->fabric_and_dram_bandwidth_vmax0p9;
+	v->fabric_and_dram_bandwidth_per_state[4] = v->fabric_and_dram_bandwidth_vmax0p9;
+	v->fabric_and_dram_bandwidth_per_state[3] = v->fabric_and_dram_bandwidth_vmax0p9;
+	v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8;
+	v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmid0p72;
+	v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmin0p65;
+	v->dcfclk_per_state[5] = v->dcfclkv_max0p9;
+	v->dcfclk_per_state[4] = v->dcfclkv_max0p9;
+	v->dcfclk_per_state[3] = v->dcfclkv_max0p9;
+	v->dcfclk_per_state[2] = v->dcfclkv_nom0p8;
+	v->dcfclk_per_state[1] = v->dcfclkv_mid0p72;
+	v->dcfclk_per_state[0] = v->dcfclkv_min0p65;
+	v->max_dispclk[5] = v->max_dispclk_vmax0p9;
+	v->max_dispclk[4] = v->max_dispclk_vmax0p9;
+	v->max_dispclk[3] = v->max_dispclk_vmax0p9;
+	v->max_dispclk[2] = v->max_dispclk_vnom0p8;
+	v->max_dispclk[1] = v->max_dispclk_vmid0p72;
+	v->max_dispclk[0] = v->max_dispclk_vmin0p65;
+	v->max_dppclk[5] = v->max_dppclk_vmax0p9;
+	v->max_dppclk[4] = v->max_dppclk_vmax0p9;
+	v->max_dppclk[3] = v->max_dppclk_vmax0p9;
+	v->max_dppclk[2] = v->max_dppclk_vnom0p8;
+	v->max_dppclk[1] = v->max_dppclk_vmid0p72;
+	v->max_dppclk[0] = v->max_dppclk_vmin0p65;
+	v->phyclk_per_state[5] = v->phyclkv_max0p9;
+	v->phyclk_per_state[4] = v->phyclkv_max0p9;
+	v->phyclk_per_state[3] = v->phyclkv_max0p9;
+	v->phyclk_per_state[2] = v->phyclkv_nom0p8;
+	v->phyclk_per_state[1] = v->phyclkv_mid0p72;
+	v->phyclk_per_state[0] = v->phyclkv_min0p65;
+
+	hack_bounding_box(v, &dc->debug, context);
+
+	if (v->voltage_override == dcn_bw_v_max0p9) {
+		v->voltage_override_level = number_of_states - 1;
+	} else if (v->voltage_override == dcn_bw_v_nom0p8) {
+		v->voltage_override_level = number_of_states - 2;
+	} else if (v->voltage_override == dcn_bw_v_mid0p72) {
+		v->voltage_override_level = number_of_states - 3;
+	} else {
+		v->voltage_override_level = 0;
+	}
+	v->synchronized_vblank = dcn_bw_no;
+	v->ta_pscalculation = dcn_bw_override;
+	v->allow_different_hratio_vratio = dcn_bw_yes;
+
+
+	for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
+		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+		if (!pipe->stream)
+			continue;
+		/* skip all but first of split pipes */
+		if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+			continue;
+
+		v->underscan_output[input_idx] = false; /* taken care of in recout already*/
+		v->interlace_output[input_idx] = false;
+
+		v->htotal[input_idx] = pipe->stream->timing.h_total;
+		v->vtotal[input_idx] = pipe->stream->timing.v_total;
+		v->vactive[input_idx] = pipe->stream->timing.v_addressable +
+				pipe->stream->timing.v_border_top + pipe->stream->timing.v_border_bottom;
+		v->v_sync_plus_back_porch[input_idx] = pipe->stream->timing.v_total
+				- v->vactive[input_idx]
+				- pipe->stream->timing.v_front_porch;
+		v->pixel_clock[input_idx] = pipe->stream->timing.pix_clk_khz / 1000.0f;
+
+		if (!pipe->plane_state) {
+			v->dcc_enable[input_idx] = dcn_bw_yes;
+			v->source_pixel_format[input_idx] = dcn_bw_rgb_sub_32;
+			v->source_surface_mode[input_idx] = dcn_bw_sw_4_kb_s;
+			v->lb_bit_per_pixel[input_idx] = 30;
+			v->viewport_width[input_idx] = pipe->stream->timing.h_addressable;
+			v->viewport_height[input_idx] = pipe->stream->timing.v_addressable;
+			v->scaler_rec_out_width[input_idx] = pipe->stream->timing.h_addressable;
+			v->scaler_recout_height[input_idx] = pipe->stream->timing.v_addressable;
+			v->override_hta_ps[input_idx] = 1;
+			v->override_vta_ps[input_idx] = 1;
+			v->override_hta_pschroma[input_idx] = 1;
+			v->override_vta_pschroma[input_idx] = 1;
+			v->source_scan[input_idx] = dcn_bw_hor;
+
+		} else {
+			v->viewport_height[input_idx] =  pipe->plane_res.scl_data.viewport.height;
+			v->viewport_width[input_idx] = pipe->plane_res.scl_data.viewport.width;
+			v->scaler_rec_out_width[input_idx] = pipe->plane_res.scl_data.recout.width;
+			v->scaler_recout_height[input_idx] = pipe->plane_res.scl_data.recout.height;
+			if (pipe->bottom_pipe && pipe->bottom_pipe->plane_state == pipe->plane_state) {
+				if (pipe->plane_state->rotation % 2 == 0) {
+					int viewport_end = pipe->plane_res.scl_data.viewport.width
+							+ pipe->plane_res.scl_data.viewport.x;
+					int viewport_b_end = pipe->bottom_pipe->plane_res.scl_data.viewport.width
+							+ pipe->bottom_pipe->plane_res.scl_data.viewport.x;
+
+					if (viewport_end > viewport_b_end)
+						v->viewport_width[input_idx] = viewport_end
+							- pipe->bottom_pipe->plane_res.scl_data.viewport.x;
+					else
+						v->viewport_width[input_idx] = viewport_b_end
+									- pipe->plane_res.scl_data.viewport.x;
+				} else  {
+					int viewport_end = pipe->plane_res.scl_data.viewport.height
+						+ pipe->plane_res.scl_data.viewport.y;
+					int viewport_b_end = pipe->bottom_pipe->plane_res.scl_data.viewport.height
+						+ pipe->bottom_pipe->plane_res.scl_data.viewport.y;
+
+					if (viewport_end > viewport_b_end)
+						v->viewport_height[input_idx] = viewport_end
+							- pipe->bottom_pipe->plane_res.scl_data.viewport.y;
+					else
+						v->viewport_height[input_idx] = viewport_b_end
+									- pipe->plane_res.scl_data.viewport.y;
+				}
+				v->scaler_rec_out_width[input_idx] = pipe->plane_res.scl_data.recout.width
+						+ pipe->bottom_pipe->plane_res.scl_data.recout.width;
+			}
+
+			v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
+			v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs(
+					pipe->plane_state->format);
+			v->source_surface_mode[input_idx] = tl_sw_mode_to_bw_defs(
+					pipe->plane_state->tiling_info.gfx9.swizzle);
+			v->lb_bit_per_pixel[input_idx] = tl_lb_bpp_to_int(pipe->plane_res.scl_data.lb_params.depth);
+			v->override_hta_ps[input_idx] = pipe->plane_res.scl_data.taps.h_taps;
+			v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps;
+			v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c;
+			v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c;
+			v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor;
+		}
+		if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)
+			v->lb_bit_per_pixel[input_idx] = v->line_buffer_fixed_bpp;
+		v->dcc_rate[input_idx] = 1; /*TODO: Worst case? does this change?*/
+		v->output_format[input_idx] = pipe->stream->timing.pixel_encoding ==
+				PIXEL_ENCODING_YCBCR420 ? dcn_bw_420 : dcn_bw_444;
+		v->output[input_idx] = pipe->stream->sink->sink_signal ==
+				SIGNAL_TYPE_HDMI_TYPE_A ? dcn_bw_hdmi : dcn_bw_dp;
+		v->output_deep_color[input_idx] = dcn_bw_encoder_8bpc;
+		if (v->output[input_idx] == dcn_bw_hdmi) {
+			switch (pipe->stream->timing.display_color_depth) {
+			case COLOR_DEPTH_101010:
+				v->output_deep_color[input_idx] = dcn_bw_encoder_10bpc;
+				break;
+			case COLOR_DEPTH_121212:
+				v->output_deep_color[input_idx]  = dcn_bw_encoder_12bpc;
+				break;
+			case COLOR_DEPTH_161616:
+				v->output_deep_color[input_idx]  = dcn_bw_encoder_16bpc;
+				break;
+			default:
+				break;
+			}
+		}
+
+		input_idx++;
+	}
+	v->number_of_active_planes = input_idx;
+
+	scaler_settings_calculation(v);
+	mode_support_and_system_configuration(v);
+
+	if (v->voltage_level == 0 &&
+			(dc->debug.sr_exit_time_dpm0_ns
+				|| dc->debug.sr_enter_plus_exit_time_dpm0_ns)) {
+
+		if (dc->debug.sr_enter_plus_exit_time_dpm0_ns)
+			v->sr_enter_plus_exit_time =
+				dc->debug.sr_enter_plus_exit_time_dpm0_ns / 1000.0f;
+		if (dc->debug.sr_exit_time_dpm0_ns)
+			v->sr_exit_time =  dc->debug.sr_exit_time_dpm0_ns / 1000.0f;
+		dc->dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
+		dc->dml.soc.sr_exit_time_us = v->sr_exit_time;
+		mode_support_and_system_configuration(v);
+	}
+
+	if (v->voltage_level != 5) {
+		float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
+		if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
+			bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65;
+		else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72)
+			bw_consumed = v->fabric_and_dram_bandwidth_vmid0p72;
+		else if (bw_consumed < v->fabric_and_dram_bandwidth_vnom0p8)
+			bw_consumed = v->fabric_and_dram_bandwidth_vnom0p8;
+		else
+			bw_consumed = v->fabric_and_dram_bandwidth_vmax0p9;
+
+		if (bw_consumed < v->fabric_and_dram_bandwidth)
+			if (dc->debug.voltage_align_fclk)
+				bw_consumed = v->fabric_and_dram_bandwidth;
+
+		display_pipe_configuration(v);
+		calc_wm_sets_and_perf_params(context, v);
+		context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 /
+				(ddr4_dram_factor_single_Channel * v->number_of_channels));
+		if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
+			context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
+		}
+
+		context->bw.dcn.calc_clk.dram_ccm_us = (int)(v->dram_clock_change_margin);
+		context->bw.dcn.calc_clk.min_active_dram_ccm_us = (int)(v->min_active_dram_clock_change_margin);
+		context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
+		context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000);
+
+		context->bw.dcn.calc_clk.dispclk_khz = (int)(v->dispclk * 1000);
+		if (dc->debug.max_disp_clk == true)
+			context->bw.dcn.calc_clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
+
+		if (context->bw.dcn.calc_clk.dispclk_khz <
+				dc->debug.min_disp_clk_khz) {
+			context->bw.dcn.calc_clk.dispclk_khz =
+					dc->debug.min_disp_clk_khz;
+		}
+
+		context->bw.dcn.calc_clk.dppclk_div = (int)(v->dispclk_dppclk_ratio) == 2;
+
+		for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
+			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+			/* skip inactive pipe */
+			if (!pipe->stream)
+				continue;
+			/* skip all but first of split pipes */
+			if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+				continue;
+
+			pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx];
+			pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx];
+			pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx];
+			pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
+
+			pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
+			pipe->pipe_dlg_param.vtotal = pipe->stream->timing.v_total;
+			vesa_sync_start = pipe->stream->timing.v_addressable +
+						pipe->stream->timing.v_border_bottom +
+						pipe->stream->timing.v_front_porch;
+
+			asic_blank_end = (pipe->stream->timing.v_total -
+						vesa_sync_start -
+						pipe->stream->timing.v_border_top)
+			* (pipe->stream->timing.flags.INTERLACE ? 1 : 0);
+
+			asic_blank_start = asic_blank_end +
+						(pipe->stream->timing.v_border_top +
+						pipe->stream->timing.v_addressable +
+						pipe->stream->timing.v_border_bottom)
+			* (pipe->stream->timing.flags.INTERLACE ? 1 : 0);
+
+			pipe->pipe_dlg_param.vblank_start = asic_blank_start;
+			pipe->pipe_dlg_param.vblank_end = asic_blank_end;
+
+			if (pipe->plane_state) {
+				struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
+
+				if (v->dpp_per_plane[input_idx] == 2 ||
+					((pipe->stream->view_format ==
+					  VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+					  pipe->stream->view_format ==
+					  VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
+					(pipe->stream->timing.timing_3d_format ==
+					 TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
+					 pipe->stream->timing.timing_3d_format ==
+					 TIMING_3D_FORMAT_SIDE_BY_SIDE))) {
+					if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
+						/* update previously split pipe */
+						hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx];
+						hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx];
+						hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx];
+						hsplit_pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
+
+						hsplit_pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
+						hsplit_pipe->pipe_dlg_param.vtotal = pipe->stream->timing.v_total;
+						hsplit_pipe->pipe_dlg_param.vblank_start = pipe->pipe_dlg_param.vblank_start;
+						hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end;
+					} else {
+						/* pipe not split previously needs split */
+						hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool);
+						ASSERT(hsplit_pipe);
+						split_stream_across_pipes(
+							&context->res_ctx, pool,
+							pipe, hsplit_pipe);
+					}
+
+					dcn_bw_calc_rq_dlg_ttu(dc, v, hsplit_pipe, input_idx);
+				} else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
+					/* merge previously split pipe */
+					pipe->bottom_pipe = hsplit_pipe->bottom_pipe;
+					if (hsplit_pipe->bottom_pipe)
+						hsplit_pipe->bottom_pipe->top_pipe = pipe;
+					hsplit_pipe->plane_state = NULL;
+					hsplit_pipe->stream = NULL;
+					hsplit_pipe->top_pipe = NULL;
+					hsplit_pipe->bottom_pipe = NULL;
+					resource_build_scaling_params(pipe);
+				}
+				/* for now important to do this after pipe split for building e2e params */
+				dcn_bw_calc_rq_dlg_ttu(dc, v, pipe, input_idx);
+			}
+
+			input_idx++;
+		}
+	}
+
+	if (v->voltage_level == 0) {
+
+		dc->dml.soc.sr_enter_plus_exit_time_us =
+				dc->dcn_soc->sr_enter_plus_exit_time;
+		dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
+	}
+
+	/*
+	 * BW limit is set to prevent display from impacting other system functions
+	 */
+
+	bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9;
+	bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit;
+
+	kernel_fpu_end();
+
+	PERFORMANCE_TRACE_END();
+
+	if (bw_limit_pass && v->voltage_level != 5)
+		return true;
+	else
+		return false;
+}
+
+static unsigned int dcn_find_normalized_clock_vdd_Level(
+	const struct dc *dc,
+	enum dm_pp_clock_type clocks_type,
+	int clocks_in_khz)
+{
+	int vdd_level = dcn_bw_v_min0p65;
+
+	if (clocks_in_khz == 0)/*todo some clock not in the considerations*/
+		return vdd_level;
+
+	switch (clocks_type) {
+	case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+		if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) {
+			vdd_level = dcn_bw_v_max0p91;
+			BREAK_TO_DEBUGGER();
+		} else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) {
+			vdd_level = dcn_bw_v_max0p9;
+		} else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmid0p72*1000) {
+			vdd_level = dcn_bw_v_nom0p8;
+		} else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmin0p65*1000) {
+			vdd_level = dcn_bw_v_mid0p72;
+		} else
+			vdd_level = dcn_bw_v_min0p65;
+		break;
+	case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+		if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) {
+			vdd_level = dcn_bw_v_max0p91;
+			BREAK_TO_DEBUGGER();
+		} else if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) {
+			vdd_level = dcn_bw_v_max0p9;
+		} else if (clocks_in_khz > dc->dcn_soc->phyclkv_mid0p72*1000) {
+			vdd_level = dcn_bw_v_nom0p8;
+		} else if (clocks_in_khz > dc->dcn_soc->phyclkv_min0p65*1000) {
+			vdd_level = dcn_bw_v_mid0p72;
+		} else
+			vdd_level = dcn_bw_v_min0p65;
+		break;
+
+	case DM_PP_CLOCK_TYPE_DPPCLK:
+		if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) {
+			vdd_level = dcn_bw_v_max0p91;
+			BREAK_TO_DEBUGGER();
+		} else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) {
+			vdd_level = dcn_bw_v_max0p9;
+		} else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmid0p72*1000) {
+			vdd_level = dcn_bw_v_nom0p8;
+		} else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmin0p65*1000) {
+			vdd_level = dcn_bw_v_mid0p72;
+		} else
+			vdd_level = dcn_bw_v_min0p65;
+		break;
+
+	case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+		{
+			unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
+
+			if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) {
+				vdd_level = dcn_bw_v_max0p91;
+				BREAK_TO_DEBUGGER();
+			} else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) {
+				vdd_level = dcn_bw_v_max0p9;
+			} else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72*1000000/factor) {
+				vdd_level = dcn_bw_v_nom0p8;
+			} else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65*1000000/factor) {
+				vdd_level = dcn_bw_v_mid0p72;
+			} else
+				vdd_level = dcn_bw_v_min0p65;
+		}
+		break;
+
+	case DM_PP_CLOCK_TYPE_DCFCLK:
+		if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) {
+			vdd_level = dcn_bw_v_max0p91;
+			BREAK_TO_DEBUGGER();
+		} else if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) {
+			vdd_level = dcn_bw_v_max0p9;
+		} else if (clocks_in_khz > dc->dcn_soc->dcfclkv_mid0p72*1000) {
+			vdd_level = dcn_bw_v_nom0p8;
+		} else if (clocks_in_khz > dc->dcn_soc->dcfclkv_min0p65*1000) {
+			vdd_level = dcn_bw_v_mid0p72;
+		} else
+			vdd_level = dcn_bw_v_min0p65;
+		break;
+
+	default:
+		 break;
+	}
+	return vdd_level;
+}
+
+unsigned int dcn_find_dcfclk_suits_all(
+	const struct dc *dc,
+	struct clocks_value *clocks)
+{
+	unsigned vdd_level, vdd_level_temp;
+	unsigned dcf_clk;
+
+	/*find a common supported voltage level*/
+	vdd_level = dcn_find_normalized_clock_vdd_Level(
+		dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_in_khz);
+	vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+		dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_in_khz);
+
+	vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+	vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+		dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_in_khz);
+	vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+
+	vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+		dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->dcfclock_in_khz);
+	vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+	vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+		dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclock_in_khz);
+
+	/*find that level conresponding dcfclk*/
+	vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+	if (vdd_level == dcn_bw_v_max0p91) {
+		BREAK_TO_DEBUGGER();
+		dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000;
+	} else if (vdd_level == dcn_bw_v_max0p9)
+		dcf_clk =  dc->dcn_soc->dcfclkv_max0p9*1000;
+	else if (vdd_level == dcn_bw_v_nom0p8)
+		dcf_clk =  dc->dcn_soc->dcfclkv_nom0p8*1000;
+	else if (vdd_level == dcn_bw_v_mid0p72)
+		dcf_clk =  dc->dcn_soc->dcfclkv_mid0p72*1000;
+	else
+		dcf_clk =  dc->dcn_soc->dcfclkv_min0p65*1000;
+
+	dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+		"\tdcf_clk for voltage = %d\n", dcf_clk);
+	return dcf_clk;
+}
+
+void dcn_bw_update_from_pplib(struct dc *dc)
+{
+	struct dc_context *ctx = dc->ctx;
+	struct dm_pp_clock_levels_with_voltage clks = {0};
+
+	kernel_fpu_begin();
+
+	/* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
+
+	if (dm_pp_get_clock_levels_by_type_with_voltage(
+			ctx, DM_PP_CLOCK_TYPE_FCLK, &clks) &&
+			clks.num_levels != 0) {
+		ASSERT(clks.num_levels >= 3);
+		dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (clks.data[0].clocks_in_khz / 1000.0) / 1000.0;
+		if (clks.num_levels > 2) {
+			dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
+					(clks.data[clks.num_levels - 3].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+		} else {
+			dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
+					(clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+		}
+		dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels *
+				(clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+		dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels *
+				(clks.data[clks.num_levels - 1].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+	} else
+		BREAK_TO_DEBUGGER();
+	if (dm_pp_get_clock_levels_by_type_with_voltage(
+				ctx, DM_PP_CLOCK_TYPE_DCFCLK, &clks) &&
+				clks.num_levels >= 3) {
+		dc->dcn_soc->dcfclkv_min0p65 = clks.data[0].clocks_in_khz / 1000.0;
+		dc->dcn_soc->dcfclkv_mid0p72 = clks.data[clks.num_levels - 3].clocks_in_khz / 1000.0;
+		dc->dcn_soc->dcfclkv_nom0p8 = clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0;
+		dc->dcn_soc->dcfclkv_max0p9 = clks.data[clks.num_levels - 1].clocks_in_khz / 1000.0;
+	} else
+		BREAK_TO_DEBUGGER();
+
+	kernel_fpu_end();
+}
+
+void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
+{
+	struct pp_smu_funcs_rv *pp = dc->res_pool->pp_smu;
+	struct pp_smu_wm_range_sets ranges = {0};
+	int max_fclk_khz, nom_fclk_khz, mid_fclk_khz, min_fclk_khz;
+	int max_dcfclk_khz, min_dcfclk_khz;
+	int socclk_khz;
+	const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
+	unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
+
+	if (!pp->set_wm_ranges)
+		return;
+
+	kernel_fpu_begin();
+	max_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000000 / factor;
+	nom_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000000 / factor;
+	mid_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000000 / factor;
+	min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
+	max_dcfclk_khz = dc->dcn_soc->dcfclkv_max0p9 * 1000;
+	min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
+	socclk_khz = dc->dcn_soc->socclk * 1000;
+	kernel_fpu_end();
+
+	/* Now notify PPLib/SMU about which Watermarks sets they should select
+	 * depending on DPM state they are in. And update BW MGR GFX Engine and
+	 * Memory clock member variables for Watermarks calculations for each
+	 * Watermark Set
+	 */
+	/* SOCCLK does not affect anytihng but writeback for DCN so for now we dont
+	 * care what the value is, hence min to overdrive level
+	 */
+	ranges.num_reader_wm_sets = WM_COUNT;
+	ranges.num_writer_wm_sets = WM_COUNT;
+	ranges.reader_wm_sets[0].wm_inst = WM_A;
+	ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz;
+	ranges.reader_wm_sets[0].max_drain_clk_khz = max_dcfclk_khz;
+	ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz;
+	ranges.reader_wm_sets[0].max_fill_clk_khz = min_fclk_khz;
+	ranges.writer_wm_sets[0].wm_inst = WM_A;
+	ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz;
+	ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive;
+	ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz;
+	ranges.writer_wm_sets[0].max_drain_clk_khz = min_fclk_khz;
+
+	ranges.reader_wm_sets[1].wm_inst = WM_B;
+	ranges.reader_wm_sets[1].min_drain_clk_khz = min_fclk_khz;
+	ranges.reader_wm_sets[1].max_drain_clk_khz = max_dcfclk_khz;
+	ranges.reader_wm_sets[1].min_fill_clk_khz = mid_fclk_khz;
+	ranges.reader_wm_sets[1].max_fill_clk_khz = mid_fclk_khz;
+	ranges.writer_wm_sets[1].wm_inst = WM_B;
+	ranges.writer_wm_sets[1].min_fill_clk_khz = socclk_khz;
+	ranges.writer_wm_sets[1].max_fill_clk_khz = overdrive;
+	ranges.writer_wm_sets[1].min_drain_clk_khz = mid_fclk_khz;
+	ranges.writer_wm_sets[1].max_drain_clk_khz = mid_fclk_khz;
+
+
+	ranges.reader_wm_sets[2].wm_inst = WM_C;
+	ranges.reader_wm_sets[2].min_drain_clk_khz = min_fclk_khz;
+	ranges.reader_wm_sets[2].max_drain_clk_khz = max_dcfclk_khz;
+	ranges.reader_wm_sets[2].min_fill_clk_khz = nom_fclk_khz;
+	ranges.reader_wm_sets[2].max_fill_clk_khz = nom_fclk_khz;
+	ranges.writer_wm_sets[2].wm_inst = WM_C;
+	ranges.writer_wm_sets[2].min_fill_clk_khz = socclk_khz;
+	ranges.writer_wm_sets[2].max_fill_clk_khz = overdrive;
+	ranges.writer_wm_sets[2].min_drain_clk_khz = nom_fclk_khz;
+	ranges.writer_wm_sets[2].max_drain_clk_khz = nom_fclk_khz;
+
+	ranges.reader_wm_sets[3].wm_inst = WM_D;
+	ranges.reader_wm_sets[3].min_drain_clk_khz = min_fclk_khz;
+	ranges.reader_wm_sets[3].max_drain_clk_khz = max_dcfclk_khz;
+	ranges.reader_wm_sets[3].min_fill_clk_khz = max_fclk_khz;
+	ranges.reader_wm_sets[3].max_fill_clk_khz = max_fclk_khz;
+	ranges.writer_wm_sets[3].wm_inst = WM_D;
+	ranges.writer_wm_sets[3].min_fill_clk_khz = socclk_khz;
+	ranges.writer_wm_sets[3].max_fill_clk_khz = overdrive;
+	ranges.writer_wm_sets[3].min_drain_clk_khz = max_fclk_khz;
+	ranges.writer_wm_sets[3].max_drain_clk_khz = max_fclk_khz;
+
+	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
+		ranges.reader_wm_sets[0].wm_inst = WM_A;
+		ranges.reader_wm_sets[0].min_drain_clk_khz = 300000;
+		ranges.reader_wm_sets[0].max_drain_clk_khz = 654000;
+		ranges.reader_wm_sets[0].min_fill_clk_khz = 800000;
+		ranges.reader_wm_sets[0].max_fill_clk_khz = 800000;
+		ranges.writer_wm_sets[0].wm_inst = WM_A;
+		ranges.writer_wm_sets[0].min_fill_clk_khz = 200000;
+		ranges.writer_wm_sets[0].max_fill_clk_khz = 757000;
+		ranges.writer_wm_sets[0].min_drain_clk_khz = 800000;
+		ranges.writer_wm_sets[0].max_drain_clk_khz = 800000;
+
+		ranges.reader_wm_sets[1].wm_inst = WM_B;
+		ranges.reader_wm_sets[1].min_drain_clk_khz = 300000;
+		ranges.reader_wm_sets[1].max_drain_clk_khz = 654000;
+		ranges.reader_wm_sets[1].min_fill_clk_khz = 933000;
+		ranges.reader_wm_sets[1].max_fill_clk_khz = 933000;
+		ranges.writer_wm_sets[1].wm_inst = WM_B;
+		ranges.writer_wm_sets[1].min_fill_clk_khz = 200000;
+		ranges.writer_wm_sets[1].max_fill_clk_khz = 757000;
+		ranges.writer_wm_sets[1].min_drain_clk_khz = 933000;
+		ranges.writer_wm_sets[1].max_drain_clk_khz = 933000;
+
+
+		ranges.reader_wm_sets[2].wm_inst = WM_C;
+		ranges.reader_wm_sets[2].min_drain_clk_khz = 300000;
+		ranges.reader_wm_sets[2].max_drain_clk_khz = 654000;
+		ranges.reader_wm_sets[2].min_fill_clk_khz = 1067000;
+		ranges.reader_wm_sets[2].max_fill_clk_khz = 1067000;
+		ranges.writer_wm_sets[2].wm_inst = WM_C;
+		ranges.writer_wm_sets[2].min_fill_clk_khz = 200000;
+		ranges.writer_wm_sets[2].max_fill_clk_khz = 757000;
+		ranges.writer_wm_sets[2].min_drain_clk_khz = 1067000;
+		ranges.writer_wm_sets[2].max_drain_clk_khz = 1067000;
+
+		ranges.reader_wm_sets[3].wm_inst = WM_D;
+		ranges.reader_wm_sets[3].min_drain_clk_khz = 300000;
+		ranges.reader_wm_sets[3].max_drain_clk_khz = 654000;
+		ranges.reader_wm_sets[3].min_fill_clk_khz = 1200000;
+		ranges.reader_wm_sets[3].max_fill_clk_khz = 1200000;
+		ranges.writer_wm_sets[3].wm_inst = WM_D;
+		ranges.writer_wm_sets[3].min_fill_clk_khz = 200000;
+		ranges.writer_wm_sets[3].max_fill_clk_khz = 757000;
+		ranges.writer_wm_sets[3].min_drain_clk_khz = 1200000;
+		ranges.writer_wm_sets[3].max_drain_clk_khz = 1200000;
+	}
+
+	/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
+	pp->set_wm_ranges(&pp->pp_smu, &ranges);
+}
+
+void dcn_bw_sync_calcs_and_dml(struct dc *dc)
+{
+	kernel_fpu_begin();
+	dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+			"sr_exit_time: %d ns\n"
+			"sr_enter_plus_exit_time: %d ns\n"
+			"urgent_latency: %d ns\n"
+			"write_back_latency: %d ns\n"
+			"percent_of_ideal_drambw_received_after_urg_latency: %d %\n"
+			"max_request_size: %d bytes\n"
+			"dcfclkv_max0p9: %d kHz\n"
+			"dcfclkv_nom0p8: %d kHz\n"
+			"dcfclkv_mid0p72: %d kHz\n"
+			"dcfclkv_min0p65: %d kHz\n"
+			"max_dispclk_vmax0p9: %d kHz\n"
+			"max_dispclk_vnom0p8: %d kHz\n"
+			"max_dispclk_vmid0p72: %d kHz\n"
+			"max_dispclk_vmin0p65: %d kHz\n"
+			"max_dppclk_vmax0p9: %d kHz\n"
+			"max_dppclk_vnom0p8: %d kHz\n"
+			"max_dppclk_vmid0p72: %d kHz\n"
+			"max_dppclk_vmin0p65: %d kHz\n"
+			"socclk: %d kHz\n"
+			"fabric_and_dram_bandwidth_vmax0p9: %d MB/s\n"
+			"fabric_and_dram_bandwidth_vnom0p8: %d MB/s\n"
+			"fabric_and_dram_bandwidth_vmid0p72: %d MB/s\n"
+			"fabric_and_dram_bandwidth_vmin0p65: %d MB/s\n"
+			"phyclkv_max0p9: %d kHz\n"
+			"phyclkv_nom0p8: %d kHz\n"
+			"phyclkv_mid0p72: %d kHz\n"
+			"phyclkv_min0p65: %d kHz\n"
+			"downspreading: %d %\n"
+			"round_trip_ping_latency_cycles: %d DCFCLK Cycles\n"
+			"urgent_out_of_order_return_per_channel: %d Bytes\n"
+			"number_of_channels: %d\n"
+			"vmm_page_size: %d Bytes\n"
+			"dram_clock_change_latency: %d ns\n"
+			"return_bus_width: %d Bytes\n",
+			dc->dcn_soc->sr_exit_time * 1000,
+			dc->dcn_soc->sr_enter_plus_exit_time * 1000,
+			dc->dcn_soc->urgent_latency * 1000,
+			dc->dcn_soc->write_back_latency * 1000,
+			dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency,
+			dc->dcn_soc->max_request_size,
+			dc->dcn_soc->dcfclkv_max0p9 * 1000,
+			dc->dcn_soc->dcfclkv_nom0p8 * 1000,
+			dc->dcn_soc->dcfclkv_mid0p72 * 1000,
+			dc->dcn_soc->dcfclkv_min0p65 * 1000,
+			dc->dcn_soc->max_dispclk_vmax0p9 * 1000,
+			dc->dcn_soc->max_dispclk_vnom0p8 * 1000,
+			dc->dcn_soc->max_dispclk_vmid0p72 * 1000,
+			dc->dcn_soc->max_dispclk_vmin0p65 * 1000,
+			dc->dcn_soc->max_dppclk_vmax0p9 * 1000,
+			dc->dcn_soc->max_dppclk_vnom0p8 * 1000,
+			dc->dcn_soc->max_dppclk_vmid0p72 * 1000,
+			dc->dcn_soc->max_dppclk_vmin0p65 * 1000,
+			dc->dcn_soc->socclk * 1000,
+			dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000,
+			dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000,
+			dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000,
+			dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000,
+			dc->dcn_soc->phyclkv_max0p9 * 1000,
+			dc->dcn_soc->phyclkv_nom0p8 * 1000,
+			dc->dcn_soc->phyclkv_mid0p72 * 1000,
+			dc->dcn_soc->phyclkv_min0p65 * 1000,
+			dc->dcn_soc->downspreading * 100,
+			dc->dcn_soc->round_trip_ping_latency_cycles,
+			dc->dcn_soc->urgent_out_of_order_return_per_channel,
+			dc->dcn_soc->number_of_channels,
+			dc->dcn_soc->vmm_page_size,
+			dc->dcn_soc->dram_clock_change_latency * 1000,
+			dc->dcn_soc->return_bus_width);
+	dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+			"rob_buffer_size_in_kbyte: %d\n"
+			"det_buffer_size_in_kbyte: %d\n"
+			"dpp_output_buffer_pixels: %d\n"
+			"opp_output_buffer_lines: %d\n"
+			"pixel_chunk_size_in_kbyte: %d\n"
+			"pte_enable: %d\n"
+			"pte_chunk_size: %d kbytes\n"
+			"meta_chunk_size: %d kbytes\n"
+			"writeback_chunk_size: %d kbytes\n"
+			"odm_capability: %d\n"
+			"dsc_capability: %d\n"
+			"line_buffer_size: %d bits\n"
+			"max_line_buffer_lines: %d\n"
+			"is_line_buffer_bpp_fixed: %d\n"
+			"line_buffer_fixed_bpp: %d\n"
+			"writeback_luma_buffer_size: %d kbytes\n"
+			"writeback_chroma_buffer_size: %d kbytes\n"
+			"max_num_dpp: %d\n"
+			"max_num_writeback: %d\n"
+			"max_dchub_topscl_throughput: %d pixels/dppclk\n"
+			"max_pscl_tolb_throughput: %d pixels/dppclk\n"
+			"max_lb_tovscl_throughput: %d pixels/dppclk\n"
+			"max_vscl_tohscl_throughput: %d pixels/dppclk\n"
+			"max_hscl_ratio: %d\n"
+			"max_vscl_ratio: %d\n"
+			"max_hscl_taps: %d\n"
+			"max_vscl_taps: %d\n"
+			"pte_buffer_size_in_requests: %d\n"
+			"dispclk_ramping_margin: %d %\n"
+			"under_scan_factor: %d %\n"
+			"max_inter_dcn_tile_repeaters: %d\n"
+			"can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one: %d\n"
+			"bug_forcing_luma_and_chroma_request_to_same_size_fixed: %d\n"
+			"dcfclk_cstate_latency: %d\n",
+			dc->dcn_ip->rob_buffer_size_in_kbyte,
+			dc->dcn_ip->det_buffer_size_in_kbyte,
+			dc->dcn_ip->dpp_output_buffer_pixels,
+			dc->dcn_ip->opp_output_buffer_lines,
+			dc->dcn_ip->pixel_chunk_size_in_kbyte,
+			dc->dcn_ip->pte_enable,
+			dc->dcn_ip->pte_chunk_size,
+			dc->dcn_ip->meta_chunk_size,
+			dc->dcn_ip->writeback_chunk_size,
+			dc->dcn_ip->odm_capability,
+			dc->dcn_ip->dsc_capability,
+			dc->dcn_ip->line_buffer_size,
+			dc->dcn_ip->max_line_buffer_lines,
+			dc->dcn_ip->is_line_buffer_bpp_fixed,
+			dc->dcn_ip->line_buffer_fixed_bpp,
+			dc->dcn_ip->writeback_luma_buffer_size,
+			dc->dcn_ip->writeback_chroma_buffer_size,
+			dc->dcn_ip->max_num_dpp,
+			dc->dcn_ip->max_num_writeback,
+			dc->dcn_ip->max_dchub_topscl_throughput,
+			dc->dcn_ip->max_pscl_tolb_throughput,
+			dc->dcn_ip->max_lb_tovscl_throughput,
+			dc->dcn_ip->max_vscl_tohscl_throughput,
+			dc->dcn_ip->max_hscl_ratio,
+			dc->dcn_ip->max_vscl_ratio,
+			dc->dcn_ip->max_hscl_taps,
+			dc->dcn_ip->max_vscl_taps,
+			dc->dcn_ip->pte_buffer_size_in_requests,
+			dc->dcn_ip->dispclk_ramping_margin,
+			dc->dcn_ip->under_scan_factor * 100,
+			dc->dcn_ip->max_inter_dcn_tile_repeaters,
+			dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one,
+			dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed,
+			dc->dcn_ip->dcfclk_cstate_latency);
+	dc->dml.soc.vmin.socclk_mhz = dc->dcn_soc->socclk;
+	dc->dml.soc.vmid.socclk_mhz = dc->dcn_soc->socclk;
+	dc->dml.soc.vnom.socclk_mhz = dc->dcn_soc->socclk;
+	dc->dml.soc.vmax.socclk_mhz = dc->dcn_soc->socclk;
+
+	dc->dml.soc.vmin.dcfclk_mhz = dc->dcn_soc->dcfclkv_min0p65;
+	dc->dml.soc.vmid.dcfclk_mhz = dc->dcn_soc->dcfclkv_mid0p72;
+	dc->dml.soc.vnom.dcfclk_mhz = dc->dcn_soc->dcfclkv_nom0p8;
+	dc->dml.soc.vmax.dcfclk_mhz = dc->dcn_soc->dcfclkv_max0p9;
+
+	dc->dml.soc.vmin.dispclk_mhz = dc->dcn_soc->max_dispclk_vmin0p65;
+	dc->dml.soc.vmid.dispclk_mhz = dc->dcn_soc->max_dispclk_vmid0p72;
+	dc->dml.soc.vnom.dispclk_mhz = dc->dcn_soc->max_dispclk_vnom0p8;
+	dc->dml.soc.vmax.dispclk_mhz = dc->dcn_soc->max_dispclk_vmax0p9;
+
+	dc->dml.soc.vmin.dppclk_mhz = dc->dcn_soc->max_dppclk_vmin0p65;
+	dc->dml.soc.vmid.dppclk_mhz = dc->dcn_soc->max_dppclk_vmid0p72;
+	dc->dml.soc.vnom.dppclk_mhz = dc->dcn_soc->max_dppclk_vnom0p8;
+	dc->dml.soc.vmax.dppclk_mhz = dc->dcn_soc->max_dppclk_vmax0p9;
+
+	dc->dml.soc.vmin.phyclk_mhz = dc->dcn_soc->phyclkv_min0p65;
+	dc->dml.soc.vmid.phyclk_mhz = dc->dcn_soc->phyclkv_mid0p72;
+	dc->dml.soc.vnom.phyclk_mhz = dc->dcn_soc->phyclkv_nom0p8;
+	dc->dml.soc.vmax.phyclk_mhz = dc->dcn_soc->phyclkv_max0p9;
+
+	dc->dml.soc.vmin.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65;
+	dc->dml.soc.vmid.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72;
+	dc->dml.soc.vnom.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8;
+	dc->dml.soc.vmax.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9;
+
+	dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
+	dc->dml.soc.sr_enter_plus_exit_time_us = dc->dcn_soc->sr_enter_plus_exit_time;
+	dc->dml.soc.urgent_latency_us = dc->dcn_soc->urgent_latency;
+	dc->dml.soc.writeback_latency_us = dc->dcn_soc->write_back_latency;
+	dc->dml.soc.ideal_dram_bw_after_urgent_percent =
+			dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency;
+	dc->dml.soc.max_request_size_bytes = dc->dcn_soc->max_request_size;
+	dc->dml.soc.downspread_percent = dc->dcn_soc->downspreading;
+	dc->dml.soc.round_trip_ping_latency_dcfclk_cycles =
+			dc->dcn_soc->round_trip_ping_latency_cycles;
+	dc->dml.soc.urgent_out_of_order_return_per_channel_bytes =
+			dc->dcn_soc->urgent_out_of_order_return_per_channel;
+	dc->dml.soc.num_chans = dc->dcn_soc->number_of_channels;
+	dc->dml.soc.vmm_page_size_bytes = dc->dcn_soc->vmm_page_size;
+	dc->dml.soc.dram_clock_change_latency_us = dc->dcn_soc->dram_clock_change_latency;
+	dc->dml.soc.return_bus_width_bytes = dc->dcn_soc->return_bus_width;
+
+	dc->dml.ip.rob_buffer_size_kbytes = dc->dcn_ip->rob_buffer_size_in_kbyte;
+	dc->dml.ip.det_buffer_size_kbytes = dc->dcn_ip->det_buffer_size_in_kbyte;
+	dc->dml.ip.dpp_output_buffer_pixels = dc->dcn_ip->dpp_output_buffer_pixels;
+	dc->dml.ip.opp_output_buffer_lines = dc->dcn_ip->opp_output_buffer_lines;
+	dc->dml.ip.pixel_chunk_size_kbytes = dc->dcn_ip->pixel_chunk_size_in_kbyte;
+	dc->dml.ip.pte_enable = dc->dcn_ip->pte_enable == dcn_bw_yes;
+	dc->dml.ip.pte_chunk_size_kbytes = dc->dcn_ip->pte_chunk_size;
+	dc->dml.ip.meta_chunk_size_kbytes = dc->dcn_ip->meta_chunk_size;
+	dc->dml.ip.writeback_chunk_size_kbytes = dc->dcn_ip->writeback_chunk_size;
+	dc->dml.ip.line_buffer_size_bits = dc->dcn_ip->line_buffer_size;
+	dc->dml.ip.max_line_buffer_lines = dc->dcn_ip->max_line_buffer_lines;
+	dc->dml.ip.IsLineBufferBppFixed = dc->dcn_ip->is_line_buffer_bpp_fixed == dcn_bw_yes;
+	dc->dml.ip.LineBufferFixedBpp = dc->dcn_ip->line_buffer_fixed_bpp;
+	dc->dml.ip.writeback_luma_buffer_size_kbytes = dc->dcn_ip->writeback_luma_buffer_size;
+	dc->dml.ip.writeback_chroma_buffer_size_kbytes = dc->dcn_ip->writeback_chroma_buffer_size;
+	dc->dml.ip.max_num_dpp = dc->dcn_ip->max_num_dpp;
+	dc->dml.ip.max_num_wb = dc->dcn_ip->max_num_writeback;
+	dc->dml.ip.max_dchub_pscl_bw_pix_per_clk = dc->dcn_ip->max_dchub_topscl_throughput;
+	dc->dml.ip.max_pscl_lb_bw_pix_per_clk = dc->dcn_ip->max_pscl_tolb_throughput;
+	dc->dml.ip.max_lb_vscl_bw_pix_per_clk = dc->dcn_ip->max_lb_tovscl_throughput;
+	dc->dml.ip.max_vscl_hscl_bw_pix_per_clk = dc->dcn_ip->max_vscl_tohscl_throughput;
+	dc->dml.ip.max_hscl_ratio = dc->dcn_ip->max_hscl_ratio;
+	dc->dml.ip.max_vscl_ratio = dc->dcn_ip->max_vscl_ratio;
+	dc->dml.ip.max_hscl_taps = dc->dcn_ip->max_hscl_taps;
+	dc->dml.ip.max_vscl_taps = dc->dcn_ip->max_vscl_taps;
+	/*pte_buffer_size_in_requests missing in dml*/
+	dc->dml.ip.dispclk_ramp_margin_percent = dc->dcn_ip->dispclk_ramping_margin;
+	dc->dml.ip.underscan_factor = dc->dcn_ip->under_scan_factor;
+	dc->dml.ip.max_inter_dcn_tile_repeaters = dc->dcn_ip->max_inter_dcn_tile_repeaters;
+	dc->dml.ip.can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one =
+		dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes;
+	dc->dml.ip.bug_forcing_LC_req_same_size_fixed =
+		dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes;
+	dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency;
+	kernel_fpu_end();
+}

+ 1677 - 0
drivers/gpu/drm/amd/display/dc/core/dc.c

@@ -0,0 +1,1677 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#include "dm_services.h"
+
+#include "dc.h"
+
+#include "core_status.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+
+#include "resource.h"
+
+#include "clock_source.h"
+#include "dc_bios_types.h"
+
+#include "bios_parser_interface.h"
+#include "include/irq_service_interface.h"
+#include "transform.h"
+#include "dpp.h"
+#include "timing_generator.h"
+#include "virtual/virtual_link_encoder.h"
+
+#include "link_hwss.h"
+#include "link_encoder.h"
+
+#include "dc_link_ddc.h"
+#include "dm_helpers.h"
+#include "mem_input.h"
+#include "hubp.h"
+
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void destroy_links(struct dc *dc)
+{
+	uint32_t i;
+
+	for (i = 0; i < dc->link_count; i++) {
+		if (NULL != dc->links[i])
+			link_destroy(&dc->links[i]);
+	}
+}
+
+static bool create_links(
+		struct dc *dc,
+		uint32_t num_virtual_links)
+{
+	int i;
+	int connectors_num;
+	struct dc_bios *bios = dc->ctx->dc_bios;
+
+	dc->link_count = 0;
+
+	connectors_num = bios->funcs->get_connectors_number(bios);
+
+	if (connectors_num > ENUM_ID_COUNT) {
+		dm_error(
+			"DC: Number of connectors %d exceeds maximum of %d!\n",
+			connectors_num,
+			ENUM_ID_COUNT);
+		return false;
+	}
+
+	if (connectors_num == 0 && num_virtual_links == 0) {
+		dm_error("DC: Number of connectors is zero!\n");
+	}
+
+	dm_output_to_console(
+		"DC: %s: connectors_num: physical:%d, virtual:%d\n",
+		__func__,
+		connectors_num,
+		num_virtual_links);
+
+	for (i = 0; i < connectors_num; i++) {
+		struct link_init_data link_init_params = {0};
+		struct dc_link *link;
+
+		link_init_params.ctx = dc->ctx;
+		/* next BIOS object table connector */
+		link_init_params.connector_index = i;
+		link_init_params.link_index = dc->link_count;
+		link_init_params.dc = dc;
+		link = link_create(&link_init_params);
+
+		if (link) {
+			dc->links[dc->link_count] = link;
+			link->dc = dc;
+			++dc->link_count;
+		}
+	}
+
+	for (i = 0; i < num_virtual_links; i++) {
+		struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
+		struct encoder_init_data enc_init = {0};
+
+		if (link == NULL) {
+			BREAK_TO_DEBUGGER();
+			goto failed_alloc;
+		}
+
+		link->ctx = dc->ctx;
+		link->dc = dc;
+		link->connector_signal = SIGNAL_TYPE_VIRTUAL;
+		link->link_id.type = OBJECT_TYPE_CONNECTOR;
+		link->link_id.id = CONNECTOR_ID_VIRTUAL;
+		link->link_id.enum_id = ENUM_ID_1;
+		link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
+
+		enc_init.ctx = dc->ctx;
+		enc_init.channel = CHANNEL_ID_UNKNOWN;
+		enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
+		enc_init.transmitter = TRANSMITTER_UNKNOWN;
+		enc_init.connector = link->link_id;
+		enc_init.encoder.type = OBJECT_TYPE_ENCODER;
+		enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
+		enc_init.encoder.enum_id = ENUM_ID_1;
+		virtual_link_encoder_construct(link->link_enc, &enc_init);
+
+		link->link_index = dc->link_count;
+		dc->links[dc->link_count] = link;
+		dc->link_count++;
+	}
+
+	return true;
+
+failed_alloc:
+	return false;
+}
+
+static bool stream_adjust_vmin_vmax(struct dc *dc,
+		struct dc_stream_state **streams, int num_streams,
+		int vmin, int vmax)
+{
+	/* TODO: Support multiple streams */
+	struct dc_stream_state *stream = streams[0];
+	int i = 0;
+	bool ret = false;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+		if (pipe->stream == stream && pipe->stream_res.stream_enc) {
+			dc->hwss.set_drr(&pipe, 1, vmin, vmax);
+
+			/* build and update the info frame */
+			resource_build_info_frame(pipe);
+			dc->hwss.update_info_frame(pipe);
+
+			ret = true;
+		}
+	}
+	return ret;
+}
+
+static bool stream_get_crtc_position(struct dc *dc,
+		struct dc_stream_state **streams, int num_streams,
+		unsigned int *v_pos, unsigned int *nom_v_pos)
+{
+	/* TODO: Support multiple streams */
+	struct dc_stream_state *stream = streams[0];
+	int i = 0;
+	bool ret = false;
+	struct crtc_position position;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		struct pipe_ctx *pipe =
+				&dc->current_state->res_ctx.pipe_ctx[i];
+
+		if (pipe->stream == stream && pipe->stream_res.stream_enc) {
+			dc->hwss.get_position(&pipe, 1, &position);
+
+			*v_pos = position.vertical_count;
+			*nom_v_pos = position.nominal_vcount;
+			ret = true;
+		}
+	}
+	return ret;
+}
+
+static bool set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
+{
+	int i = 0;
+	bool ret = false;
+	struct pipe_ctx *pipes;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
+			pipes = &dc->current_state->res_ctx.pipe_ctx[i];
+			dc->hwss.program_gamut_remap(pipes);
+			ret = true;
+		}
+	}
+
+	return ret;
+}
+
+static bool program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
+{
+	int i = 0;
+	bool ret = false;
+	struct pipe_ctx *pipes;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (dc->current_state->res_ctx.pipe_ctx[i].stream
+				== stream) {
+
+			pipes = &dc->current_state->res_ctx.pipe_ctx[i];
+			dc->hwss.program_csc_matrix(pipes,
+			stream->output_color_space,
+			stream->csc_color_matrix.matrix);
+			ret = true;
+		}
+	}
+
+	return ret;
+}
+
+static void set_static_screen_events(struct dc *dc,
+		struct dc_stream_state **streams,
+		int num_streams,
+		const struct dc_static_screen_events *events)
+{
+	int i = 0;
+	int j = 0;
+	struct pipe_ctx *pipes_affected[MAX_PIPES];
+	int num_pipes_affected = 0;
+
+	for (i = 0; i < num_streams; i++) {
+		struct dc_stream_state *stream = streams[i];
+
+		for (j = 0; j < MAX_PIPES; j++) {
+			if (dc->current_state->res_ctx.pipe_ctx[j].stream
+					== stream) {
+				pipes_affected[num_pipes_affected++] =
+						&dc->current_state->res_ctx.pipe_ctx[j];
+			}
+		}
+	}
+
+	dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
+}
+
+static void set_drive_settings(struct dc *dc,
+		struct link_training_settings *lt_settings,
+		const struct dc_link *link)
+{
+
+	int i;
+
+	for (i = 0; i < dc->link_count; i++) {
+		if (dc->links[i] == link)
+			break;
+	}
+
+	if (i >= dc->link_count)
+		ASSERT_CRITICAL(false);
+
+	dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
+}
+
+static void perform_link_training(struct dc *dc,
+		struct dc_link_settings *link_setting,
+		bool skip_video_pattern)
+{
+	int i;
+
+	for (i = 0; i < dc->link_count; i++)
+		dc_link_dp_perform_link_training(
+			dc->links[i],
+			link_setting,
+			skip_video_pattern);
+}
+
+static void set_preferred_link_settings(struct dc *dc,
+		struct dc_link_settings *link_setting,
+		struct dc_link *link)
+{
+	link->preferred_link_setting = *link_setting;
+	dp_retrain_link_dp_test(link, link_setting, false);
+}
+
+static void enable_hpd(const struct dc_link *link)
+{
+	dc_link_dp_enable_hpd(link);
+}
+
+static void disable_hpd(const struct dc_link *link)
+{
+	dc_link_dp_disable_hpd(link);
+}
+
+
+static void set_test_pattern(
+		struct dc_link *link,
+		enum dp_test_pattern test_pattern,
+		const struct link_training_settings *p_link_settings,
+		const unsigned char *p_custom_pattern,
+		unsigned int cust_pattern_size)
+{
+	if (link != NULL)
+		dc_link_dp_set_test_pattern(
+			link,
+			test_pattern,
+			p_link_settings,
+			p_custom_pattern,
+			cust_pattern_size);
+}
+
+static void set_dither_option(struct dc_stream_state *stream,
+		enum dc_dither_option option)
+{
+	struct bit_depth_reduction_params params;
+	struct dc_link *link = stream->status.link;
+	struct pipe_ctx *pipes = NULL;
+	int i;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
+				stream) {
+			pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
+			break;
+		}
+	}
+
+	memset(&params, 0, sizeof(params));
+	if (!pipes)
+		return;
+	if (option > DITHER_OPTION_MAX)
+		return;
+
+	stream->dither_option = option;
+
+	resource_build_bit_depth_reduction_params(stream,
+				&params);
+	stream->bit_depth_params = params;
+	pipes->stream_res.opp->funcs->
+		opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
+}
+
+void set_dpms(
+	struct dc *dc,
+	struct dc_stream_state *stream,
+	bool dpms_off)
+{
+	struct pipe_ctx *pipe_ctx = NULL;
+	int i;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
+			pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+			break;
+		}
+	}
+
+	if (!pipe_ctx) {
+		ASSERT(0);
+		return;
+	}
+
+	if (stream->dpms_off != dpms_off) {
+		stream->dpms_off = dpms_off;
+		if (dpms_off)
+			core_link_disable_stream(pipe_ctx,
+					KEEP_ACQUIRED_RESOURCE);
+		else
+			core_link_enable_stream(dc->current_state, pipe_ctx);
+	}
+}
+
+static void allocate_dc_stream_funcs(struct dc  *dc)
+{
+	if (dc->hwss.set_drr != NULL) {
+		dc->stream_funcs.adjust_vmin_vmax =
+				stream_adjust_vmin_vmax;
+	}
+
+	dc->stream_funcs.set_static_screen_events =
+			set_static_screen_events;
+
+	dc->stream_funcs.get_crtc_position =
+			stream_get_crtc_position;
+
+	dc->stream_funcs.set_gamut_remap =
+			set_gamut_remap;
+
+	dc->stream_funcs.program_csc_matrix =
+			program_csc_matrix;
+
+	dc->stream_funcs.set_dither_option =
+			set_dither_option;
+
+	dc->stream_funcs.set_dpms =
+			set_dpms;
+
+	dc->link_funcs.set_drive_settings =
+			set_drive_settings;
+
+	dc->link_funcs.perform_link_training =
+			perform_link_training;
+
+	dc->link_funcs.set_preferred_link_settings =
+			set_preferred_link_settings;
+
+	dc->link_funcs.enable_hpd =
+			enable_hpd;
+
+	dc->link_funcs.disable_hpd =
+			disable_hpd;
+
+	dc->link_funcs.set_test_pattern =
+			set_test_pattern;
+}
+
+static void destruct(struct dc *dc)
+{
+	dc_release_state(dc->current_state);
+	dc->current_state = NULL;
+
+	destroy_links(dc);
+
+	dc_destroy_resource_pool(dc);
+
+	if (dc->ctx->gpio_service)
+		dal_gpio_service_destroy(&dc->ctx->gpio_service);
+
+	if (dc->ctx->i2caux)
+		dal_i2caux_destroy(&dc->ctx->i2caux);
+
+	if (dc->ctx->created_bios)
+		dal_bios_parser_destroy(&dc->ctx->dc_bios);
+
+	if (dc->ctx->logger)
+		dal_logger_destroy(&dc->ctx->logger);
+
+	kfree(dc->ctx);
+	dc->ctx = NULL;
+
+	kfree(dc->bw_vbios);
+	dc->bw_vbios = NULL;
+
+	kfree(dc->bw_dceip);
+	dc->bw_dceip = NULL;
+
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+	kfree(dc->dcn_soc);
+	dc->dcn_soc = NULL;
+
+	kfree(dc->dcn_ip);
+	dc->dcn_ip = NULL;
+
+#endif
+}
+
+static bool construct(struct dc *dc,
+		const struct dc_init_data *init_params)
+{
+	struct dal_logger *logger;
+	struct dc_context *dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
+	struct bw_calcs_dceip *dc_dceip = kzalloc(sizeof(*dc_dceip),
+						  GFP_KERNEL);
+	struct bw_calcs_vbios *dc_vbios = kzalloc(sizeof(*dc_vbios),
+						  GFP_KERNEL);
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+	struct dcn_soc_bounding_box *dcn_soc = kzalloc(sizeof(*dcn_soc),
+						       GFP_KERNEL);
+	struct dcn_ip_params *dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
+#endif
+
+	enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+
+	if (!dc_dceip) {
+		dm_error("%s: failed to create dceip\n", __func__);
+		goto fail;
+	}
+
+	dc->bw_dceip = dc_dceip;
+
+	if (!dc_vbios) {
+		dm_error("%s: failed to create vbios\n", __func__);
+		goto fail;
+	}
+
+	dc->bw_vbios = dc_vbios;
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+	if (!dcn_soc) {
+		dm_error("%s: failed to create dcn_soc\n", __func__);
+		goto fail;
+	}
+
+	dc->dcn_soc = dcn_soc;
+
+	if (!dcn_ip) {
+		dm_error("%s: failed to create dcn_ip\n", __func__);
+		goto fail;
+	}
+
+	dc->dcn_ip = dcn_ip;
+#endif
+
+	if (!dc_ctx) {
+		dm_error("%s: failed to create ctx\n", __func__);
+		goto fail;
+	}
+
+	dc->current_state = dc_create_state();
+
+	if (!dc->current_state) {
+		dm_error("%s: failed to create validate ctx\n", __func__);
+		goto fail;
+	}
+
+	dc_ctx->cgs_device = init_params->cgs_device;
+	dc_ctx->driver_context = init_params->driver;
+	dc_ctx->dc = dc;
+	dc_ctx->asic_id = init_params->asic_id;
+
+	/* Create logger */
+	logger = dal_logger_create(dc_ctx, init_params->log_mask);
+
+	if (!logger) {
+		/* can *not* call logger. call base driver 'print error' */
+		dm_error("%s: failed to create Logger!\n", __func__);
+		goto fail;
+	}
+	dc_ctx->logger = logger;
+	dc->ctx = dc_ctx;
+	dc->ctx->dce_environment = init_params->dce_environment;
+
+	dc_version = resource_parse_asic_id(init_params->asic_id);
+	dc->ctx->dce_version = dc_version;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+	dc->ctx->fbc_gpu_addr = init_params->fbc_gpu_addr;
+#endif
+	/* Resource should construct all asic specific resources.
+	 * This should be the only place where we need to parse the asic id
+	 */
+	if (init_params->vbios_override)
+		dc_ctx->dc_bios = init_params->vbios_override;
+	else {
+		/* Create BIOS parser */
+		struct bp_init_data bp_init_data;
+
+		bp_init_data.ctx = dc_ctx;
+		bp_init_data.bios = init_params->asic_id.atombios_base_address;
+
+		dc_ctx->dc_bios = dal_bios_parser_create(
+				&bp_init_data, dc_version);
+
+		if (!dc_ctx->dc_bios) {
+			ASSERT_CRITICAL(false);
+			goto fail;
+		}
+
+		dc_ctx->created_bios = true;
+		}
+
+	/* Create I2C AUX */
+	dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
+
+	if (!dc_ctx->i2caux) {
+		ASSERT_CRITICAL(false);
+		goto fail;
+	}
+
+	/* Create GPIO service */
+	dc_ctx->gpio_service = dal_gpio_service_create(
+			dc_version,
+			dc_ctx->dce_environment,
+			dc_ctx);
+
+	if (!dc_ctx->gpio_service) {
+		ASSERT_CRITICAL(false);
+		goto fail;
+	}
+
+	dc->res_pool = dc_create_resource_pool(
+			dc,
+			init_params->num_virtual_links,
+			dc_version,
+			init_params->asic_id);
+	if (!dc->res_pool)
+		goto fail;
+
+	dc_resource_state_construct(dc, dc->current_state);
+
+	if (!create_links(dc, init_params->num_virtual_links))
+		goto fail;
+
+	allocate_dc_stream_funcs(dc);
+
+	return true;
+
+fail:
+
+	destruct(dc);
+	return false;
+}
+
+static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+{
+	int i, j;
+	struct dc_state *dangling_context = dc_create_state();
+	struct dc_state *current_ctx;
+
+	if (dangling_context == NULL)
+		return;
+
+	dc_resource_state_copy_construct(dc->current_state, dangling_context);
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct dc_stream_state *old_stream =
+				dc->current_state->res_ctx.pipe_ctx[i].stream;
+		bool should_disable = true;
+
+		for (j = 0; j < context->stream_count; j++) {
+			if (old_stream == context->streams[j]) {
+				should_disable = false;
+				break;
+			}
+		}
+		if (should_disable && old_stream) {
+			dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
+			dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+		}
+	}
+
+	current_ctx = dc->current_state;
+	dc->current_state = dangling_context;
+	dc_release_state(current_ctx);
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+
+struct dc *dc_create(const struct dc_init_data *init_params)
+ {
+	struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
+	unsigned int full_pipe_count;
+
+	if (NULL == dc)
+		goto alloc_fail;
+
+	if (false == construct(dc, init_params))
+		goto construct_fail;
+
+	/*TODO: separate HW and SW initialization*/
+	dc->hwss.init_hw(dc);
+
+	full_pipe_count = dc->res_pool->pipe_count;
+	if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
+		full_pipe_count--;
+	dc->caps.max_streams = min(
+			full_pipe_count,
+			dc->res_pool->stream_enc_count);
+
+	dc->caps.max_links = dc->link_count;
+	dc->caps.max_audios = dc->res_pool->audio_count;
+
+	dc->config = init_params->flags;
+
+	dm_logger_write(dc->ctx->logger, LOG_DC,
+			"Display Core initialized\n");
+
+
+	/* TODO: missing feature to be enabled */
+	dc->debug.disable_dfs_bypass = true;
+
+	return dc;
+
+construct_fail:
+	kfree(dc);
+
+alloc_fail:
+	return NULL;
+}
+
+void dc_destroy(struct dc **dc)
+{
+	destruct(*dc);
+	kfree(*dc);
+	*dc = NULL;
+}
+
+static void program_timing_sync(
+		struct dc *dc,
+		struct dc_state *ctx)
+{
+	int i, j;
+	int group_index = 0;
+	int pipe_count = dc->res_pool->pipe_count;
+	struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
+
+	for (i = 0; i < pipe_count; i++) {
+		if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
+			continue;
+
+		unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
+	}
+
+	for (i = 0; i < pipe_count; i++) {
+		int group_size = 1;
+		struct pipe_ctx *pipe_set[MAX_PIPES];
+
+		if (!unsynced_pipes[i])
+			continue;
+
+		pipe_set[0] = unsynced_pipes[i];
+		unsynced_pipes[i] = NULL;
+
+		/* Add tg to the set, search rest of the tg's for ones with
+		 * same timing, add all tgs with same timing to the group
+		 */
+		for (j = i + 1; j < pipe_count; j++) {
+			if (!unsynced_pipes[j])
+				continue;
+
+			if (resource_are_streams_timing_synchronizable(
+					unsynced_pipes[j]->stream,
+					pipe_set[0]->stream)) {
+				pipe_set[group_size] = unsynced_pipes[j];
+				unsynced_pipes[j] = NULL;
+				group_size++;
+			}
+		}
+
+		/* set first unblanked pipe as master */
+		for (j = 0; j < group_size; j++) {
+			struct pipe_ctx *temp;
+
+			if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
+				if (j == 0)
+					break;
+
+				temp = pipe_set[0];
+				pipe_set[0] = pipe_set[j];
+				pipe_set[j] = temp;
+				break;
+			}
+		}
+
+		/* remove any other unblanked pipes as they have already been synced */
+		for (j = j + 1; j < group_size; j++) {
+			if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
+				group_size--;
+				pipe_set[j] = pipe_set[group_size];
+				j--;
+			}
+		}
+
+		if (group_size > 1) {
+			dc->hwss.enable_timing_synchronization(
+				dc, group_index, group_size, pipe_set);
+			group_index++;
+		}
+	}
+}
+
+static bool context_changed(
+		struct dc *dc,
+		struct dc_state *context)
+{
+	uint8_t i;
+
+	if (context->stream_count != dc->current_state->stream_count)
+		return true;
+
+	for (i = 0; i < dc->current_state->stream_count; i++) {
+		if (dc->current_state->streams[i] != context->streams[i])
+			return true;
+	}
+
+	return false;
+}
+
+bool dc_enable_stereo(
+	struct dc *dc,
+	struct dc_state *context,
+	struct dc_stream_state *streams[],
+	uint8_t stream_count)
+{
+	bool ret = true;
+	int i, j;
+	struct pipe_ctx *pipe;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (context != NULL)
+			pipe = &context->res_ctx.pipe_ctx[i];
+		else
+			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+		for (j = 0 ; pipe && j < stream_count; j++)  {
+			if (streams[j] && streams[j] == pipe->stream &&
+				dc->hwss.setup_stereo)
+				dc->hwss.setup_stereo(pipe, dc);
+		}
+	}
+
+	return ret;
+}
+
+
+/*
+ * Applies given context to HW and copy it into current context.
+ * It's up to the user to release the src context afterwards.
+ */
+static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
+{
+	struct dc_bios *dcb = dc->ctx->dc_bios;
+	enum dc_status result = DC_ERROR_UNEXPECTED;
+	struct pipe_ctx *pipe;
+	int i, j, k, l;
+	struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
+
+	disable_dangling_plane(dc, context);
+
+	for (i = 0; i < context->stream_count; i++)
+		dc_streams[i] =  context->streams[i];
+
+	if (!dcb->funcs->is_accelerated_mode(dcb))
+		dc->hwss.enable_accelerated_mode(dc);
+
+	for (i = 0; i < context->stream_count; i++) {
+		const struct dc_sink *sink = context->streams[i]->sink;
+
+		dc->hwss.apply_ctx_for_surface(
+				dc, context->streams[i],
+				context->stream_status[i].plane_count,
+				context);
+
+		/*
+		 * enable stereo
+		 * TODO rework dc_enable_stereo call to work with validation sets?
+		 */
+		for (k = 0; k < MAX_PIPES; k++) {
+			pipe = &context->res_ctx.pipe_ctx[k];
+
+			for (l = 0 ; pipe && l < context->stream_count; l++)  {
+				if (context->streams[l] &&
+					context->streams[l] == pipe->stream &&
+					dc->hwss.setup_stereo)
+					dc->hwss.setup_stereo(pipe, dc);
+			}
+		}
+
+		CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
+				context->streams[i]->timing.h_addressable,
+				context->streams[i]->timing.v_addressable,
+				context->streams[i]->timing.h_total,
+				context->streams[i]->timing.v_total,
+				context->streams[i]->timing.pix_clk_khz);
+	}
+
+	dc->hwss.ready_shared_resources(dc, context);
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		pipe = &context->res_ctx.pipe_ctx[i];
+		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
+	}
+	result = dc->hwss.apply_ctx_to_hw(dc, context);
+
+	program_timing_sync(dc, context);
+
+	dc_enable_stereo(dc, context, dc_streams, context->stream_count);
+
+	for (i = 0; i < context->stream_count; i++) {
+		for (j = 0; j < MAX_PIPES; j++) {
+			pipe = &context->res_ctx.pipe_ctx[j];
+
+			if (!pipe->top_pipe && pipe->stream == context->streams[i])
+				dc->hwss.pipe_control_lock(dc, pipe, false);
+		}
+	}
+
+	dc_release_state(dc->current_state);
+
+	dc->current_state = context;
+
+	dc_retain_state(dc->current_state);
+
+	dc->hwss.optimize_shared_resources(dc);
+
+	return result;
+}
+
+bool dc_commit_state(struct dc *dc, struct dc_state *context)
+{
+	enum dc_status result = DC_ERROR_UNEXPECTED;
+	int i;
+
+	if (false == context_changed(dc, context))
+		return DC_OK;
+
+	dm_logger_write(dc->ctx->logger, LOG_DC, "%s: %d streams\n",
+				__func__, context->stream_count);
+
+	for (i = 0; i < context->stream_count; i++) {
+		struct dc_stream_state *stream = context->streams[i];
+
+		dc_stream_log(stream,
+				dc->ctx->logger,
+				LOG_DC);
+	}
+
+	result = dc_commit_state_no_check(dc, context);
+
+	return (result == DC_OK);
+}
+
+
+bool dc_post_update_surfaces_to_stream(struct dc *dc)
+{
+	int i;
+	struct dc_state *context = dc->current_state;
+
+	post_surface_trace(dc);
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++)
+		if (context->res_ctx.pipe_ctx[i].stream == NULL
+				|| context->res_ctx.pipe_ctx[i].plane_state == NULL)
+			dc->hwss.power_down_front_end(dc, i);
+
+	/* 3rd param should be true, temp w/a for RV*/
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+	dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
+#else
+	dc->hwss.set_bandwidth(dc, context, true);
+#endif
+	return true;
+}
+
+/*
+ * TODO this whole function needs to go
+ *
+ * dc_surface_update is needlessly complex. See if we can just replace this
+ * with a dc_plane_state and follow the atomic model a bit more closely here.
+ */
+bool dc_commit_planes_to_stream(
+		struct dc *dc,
+		struct dc_plane_state **plane_states,
+		uint8_t new_plane_count,
+		struct dc_stream_state *dc_stream,
+		struct dc_state *state)
+{
+	/* no need to dynamically allocate this. it's pretty small */
+	struct dc_surface_update updates[MAX_SURFACES];
+	struct dc_flip_addrs *flip_addr;
+	struct dc_plane_info *plane_info;
+	struct dc_scaling_info *scaling_info;
+	int i;
+	struct dc_stream_update *stream_update =
+			kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
+
+	if (!stream_update) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
+			    GFP_KERNEL);
+	plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
+			     GFP_KERNEL);
+	scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
+			       GFP_KERNEL);
+
+	if (!flip_addr || !plane_info || !scaling_info) {
+		kfree(flip_addr);
+		kfree(plane_info);
+		kfree(scaling_info);
+		kfree(stream_update);
+		return false;
+	}
+
+	memset(updates, 0, sizeof(updates));
+
+	stream_update->src = dc_stream->src;
+	stream_update->dst = dc_stream->dst;
+	stream_update->out_transfer_func = dc_stream->out_transfer_func;
+
+	for (i = 0; i < new_plane_count; i++) {
+		updates[i].surface = plane_states[i];
+		updates[i].gamma =
+			(struct dc_gamma *)plane_states[i]->gamma_correction;
+		updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
+		flip_addr[i].address = plane_states[i]->address;
+		flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
+		plane_info[i].color_space = plane_states[i]->color_space;
+		plane_info[i].format = plane_states[i]->format;
+		plane_info[i].plane_size = plane_states[i]->plane_size;
+		plane_info[i].rotation = plane_states[i]->rotation;
+		plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
+		plane_info[i].stereo_format = plane_states[i]->stereo_format;
+		plane_info[i].tiling_info = plane_states[i]->tiling_info;
+		plane_info[i].visible = plane_states[i]->visible;
+		plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
+		plane_info[i].dcc = plane_states[i]->dcc;
+		scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
+		scaling_info[i].src_rect = plane_states[i]->src_rect;
+		scaling_info[i].dst_rect = plane_states[i]->dst_rect;
+		scaling_info[i].clip_rect = plane_states[i]->clip_rect;
+
+		updates[i].flip_addr = &flip_addr[i];
+		updates[i].plane_info = &plane_info[i];
+		updates[i].scaling_info = &scaling_info[i];
+	}
+
+	dc_commit_updates_for_stream(
+			dc,
+			updates,
+			new_plane_count,
+			dc_stream, stream_update, plane_states, state);
+
+	kfree(flip_addr);
+	kfree(plane_info);
+	kfree(scaling_info);
+	kfree(stream_update);
+	return true;
+}
+
+struct dc_state *dc_create_state(void)
+{
+	struct dc_state *context = kzalloc(sizeof(struct dc_state),
+					   GFP_KERNEL);
+
+	if (!context)
+		return NULL;
+
+	kref_init(&context->refcount);
+	return context;
+}
+
+void dc_retain_state(struct dc_state *context)
+{
+	kref_get(&context->refcount);
+}
+
+static void dc_state_free(struct kref *kref)
+{
+	struct dc_state *context = container_of(kref, struct dc_state, refcount);
+	dc_resource_state_destruct(context);
+	kfree(context);
+}
+
+void dc_release_state(struct dc_state *context)
+{
+	kref_put(&context->refcount, dc_state_free);
+}
+
+static bool is_surface_in_context(
+		const struct dc_state *context,
+		const struct dc_plane_state *plane_state)
+{
+	int j;
+
+	for (j = 0; j < MAX_PIPES; j++) {
+		const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+		if (plane_state == pipe_ctx->plane_state) {
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
+{
+	switch (format) {
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+		return 12;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+	case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+		return 16;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+		return 32;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+		return 64;
+	default:
+		ASSERT_CRITICAL(false);
+		return -1;
+	}
+}
+
+static enum surface_update_type get_plane_info_update_type(
+		const struct dc_surface_update *u,
+		int surface_index)
+{
+	struct dc_plane_info temp_plane_info;
+	memset(&temp_plane_info, 0, sizeof(temp_plane_info));
+
+	if (!u->plane_info)
+		return UPDATE_TYPE_FAST;
+
+	temp_plane_info = *u->plane_info;
+
+	/* Copy all parameters that will cause a full update
+	 * from current surface, the rest of the parameters
+	 * from provided plane configuration.
+	 * Perform memory compare and special validation
+	 * for those that can cause fast/medium updates
+	 */
+
+	/* Full update parameters */
+	temp_plane_info.color_space = u->surface->color_space;
+	temp_plane_info.dcc = u->surface->dcc;
+	temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
+	temp_plane_info.plane_size = u->surface->plane_size;
+	temp_plane_info.rotation = u->surface->rotation;
+	temp_plane_info.stereo_format = u->surface->stereo_format;
+
+	if (surface_index == 0)
+		temp_plane_info.visible = u->plane_info->visible;
+	else
+		temp_plane_info.visible = u->surface->visible;
+
+	if (memcmp(u->plane_info, &temp_plane_info,
+			sizeof(struct dc_plane_info)) != 0)
+		return UPDATE_TYPE_FULL;
+
+	if (pixel_format_to_bpp(u->plane_info->format) !=
+			pixel_format_to_bpp(u->surface->format)) {
+		/* different bytes per element will require full bandwidth
+		 * and DML calculation
+		 */
+		return UPDATE_TYPE_FULL;
+	}
+
+	if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
+			sizeof(union dc_tiling_info)) != 0) {
+		/* todo: below are HW dependent, we should add a hook to
+		 * DCE/N resource and validated there.
+		 */
+		if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
+			/* swizzled mode requires RQ to be setup properly,
+			 * thus need to run DML to calculate RQ settings
+			 */
+			return UPDATE_TYPE_FULL;
+		}
+	}
+
+	return UPDATE_TYPE_MED;
+}
+
+static enum surface_update_type  get_scaling_info_update_type(
+		const struct dc_surface_update *u)
+{
+	if (!u->scaling_info)
+		return UPDATE_TYPE_FAST;
+
+	if (u->scaling_info->src_rect.width != u->surface->src_rect.width
+			|| u->scaling_info->src_rect.height != u->surface->src_rect.height
+			|| u->scaling_info->clip_rect.width != u->surface->clip_rect.width
+			|| u->scaling_info->clip_rect.height != u->surface->clip_rect.height
+			|| u->scaling_info->dst_rect.width != u->surface->dst_rect.width
+			|| u->scaling_info->dst_rect.height != u->surface->dst_rect.height)
+		return UPDATE_TYPE_FULL;
+
+	if (u->scaling_info->src_rect.x != u->surface->src_rect.x
+			|| u->scaling_info->src_rect.y != u->surface->src_rect.y
+			|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
+			|| u->scaling_info->clip_rect.y != u->surface->clip_rect.y
+			|| u->scaling_info->dst_rect.x != u->surface->dst_rect.x
+			|| u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
+		return UPDATE_TYPE_MED;
+
+	return UPDATE_TYPE_FAST;
+}
+
+static enum surface_update_type det_surface_update(
+		const struct dc *dc,
+		const struct dc_surface_update *u,
+		int surface_index)
+{
+	const struct dc_state *context = dc->current_state;
+	enum surface_update_type type = UPDATE_TYPE_FAST;
+	enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+
+	if (!is_surface_in_context(context, u->surface))
+		return UPDATE_TYPE_FULL;
+
+	type = get_plane_info_update_type(u, surface_index);
+	if (overall_type < type)
+		overall_type = type;
+
+	type = get_scaling_info_update_type(u);
+	if (overall_type < type)
+		overall_type = type;
+
+	if (u->in_transfer_func ||
+		u->hdr_static_metadata) {
+		if (overall_type < UPDATE_TYPE_MED)
+			overall_type = UPDATE_TYPE_MED;
+	}
+
+	return overall_type;
+}
+
+enum surface_update_type dc_check_update_surfaces_for_stream(
+		struct dc *dc,
+		struct dc_surface_update *updates,
+		int surface_count,
+		struct dc_stream_update *stream_update,
+		const struct dc_stream_status *stream_status)
+{
+	int i;
+	enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+
+	if (stream_status == NULL || stream_status->plane_count != surface_count)
+		return UPDATE_TYPE_FULL;
+
+	if (stream_update)
+		return UPDATE_TYPE_FULL;
+
+	for (i = 0 ; i < surface_count; i++) {
+		enum surface_update_type type =
+				det_surface_update(dc, &updates[i], i);
+
+		if (type == UPDATE_TYPE_FULL)
+			return type;
+
+		if (overall_type < type)
+			overall_type = type;
+	}
+
+	return overall_type;
+}
+
+static struct dc_stream_status *stream_get_status(
+	struct dc_state *ctx,
+	struct dc_stream_state *stream)
+{
+	uint8_t i;
+
+	for (i = 0; i < ctx->stream_count; i++) {
+		if (stream == ctx->streams[i]) {
+			return &ctx->stream_status[i];
+		}
+	}
+
+	return NULL;
+}
+
+static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
+
+
+static void commit_planes_for_stream(struct dc *dc,
+		struct dc_surface_update *srf_updates,
+		int surface_count,
+		struct dc_stream_state *stream,
+		struct dc_stream_update *stream_update,
+		enum surface_update_type update_type,
+		struct dc_state *context)
+{
+	int i, j;
+
+	if (update_type == UPDATE_TYPE_FULL) {
+		dc->hwss.set_bandwidth(dc, context, false);
+		context_clock_trace(dc, context);
+	}
+
+	if (update_type > UPDATE_TYPE_FAST) {
+		for (j = 0; j < dc->res_pool->pipe_count; j++) {
+			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+			dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
+		}
+	}
+
+	if (surface_count == 0) {
+		/*
+		 * In case of turning off screen, no need to program front end a second time.
+		 * just return after program front end.
+		 */
+		dc->hwss.apply_ctx_for_surface(dc, stream, surface_count, context);
+		return;
+	}
+
+	/* Lock pipes for provided surfaces, or all active if full update*/
+	for (i = 0; i < surface_count; i++) {
+		struct dc_plane_state *plane_state = srf_updates[i].surface;
+
+		for (j = 0; j < dc->res_pool->pipe_count; j++) {
+			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+			if (update_type != UPDATE_TYPE_FULL && pipe_ctx->plane_state != plane_state)
+				continue;
+			if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
+				continue;
+
+			dc->hwss.pipe_control_lock(
+					dc,
+					pipe_ctx,
+					true);
+		}
+		if (update_type == UPDATE_TYPE_FULL)
+			break;
+	}
+
+	/* Full fe update*/
+	for (j = 0; j < dc->res_pool->pipe_count; j++) {
+		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+		if (update_type != UPDATE_TYPE_FULL || !pipe_ctx->plane_state)
+			continue;
+
+		if (!pipe_ctx->top_pipe && pipe_ctx->stream) {
+			struct dc_stream_status *stream_status = stream_get_status(context, pipe_ctx->stream);
+
+			dc->hwss.apply_ctx_for_surface(
+					dc, pipe_ctx->stream, stream_status->plane_count, context);
+		}
+	}
+
+	if (update_type > UPDATE_TYPE_FAST)
+		context_timing_trace(dc, &context->res_ctx);
+
+	/* Perform requested Updates */
+	for (i = 0; i < surface_count; i++) {
+		struct dc_plane_state *plane_state = srf_updates[i].surface;
+
+		if (update_type == UPDATE_TYPE_MED)
+			dc->hwss.apply_ctx_for_surface(
+					dc, stream, surface_count, context);
+
+		for (j = 0; j < dc->res_pool->pipe_count; j++) {
+			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+			if (pipe_ctx->plane_state != plane_state)
+				continue;
+
+			if (srf_updates[i].flip_addr)
+				dc->hwss.update_plane_addr(dc, pipe_ctx);
+
+			if (update_type == UPDATE_TYPE_FAST)
+				continue;
+
+			/* work around to program degamma regs for split pipe after set mode. */
+			if (srf_updates[i].in_transfer_func || (pipe_ctx->top_pipe &&
+					pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state))
+				dc->hwss.set_input_transfer_func(
+						pipe_ctx, pipe_ctx->plane_state);
+
+			if (stream_update != NULL &&
+					stream_update->out_transfer_func != NULL) {
+				dc->hwss.set_output_transfer_func(
+						pipe_ctx, pipe_ctx->stream);
+			}
+
+			if (srf_updates[i].hdr_static_metadata) {
+				resource_build_info_frame(pipe_ctx);
+				dc->hwss.update_info_frame(pipe_ctx);
+			}
+		}
+	}
+
+	/* Unlock pipes */
+	for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+		for (j = 0; j < surface_count; j++) {
+			if (update_type != UPDATE_TYPE_FULL &&
+			    srf_updates[j].surface != pipe_ctx->plane_state)
+				continue;
+			if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
+				continue;
+
+			dc->hwss.pipe_control_lock(
+					dc,
+					pipe_ctx,
+					false);
+
+			break;
+		}
+	}
+}
+
+void dc_commit_updates_for_stream(struct dc *dc,
+		struct dc_surface_update *srf_updates,
+		int surface_count,
+		struct dc_stream_state *stream,
+		struct dc_stream_update *stream_update,
+		struct dc_plane_state **plane_states,
+		struct dc_state *state)
+{
+	const struct dc_stream_status *stream_status;
+	enum surface_update_type update_type;
+	struct dc_state *context;
+	struct dc_context *dc_ctx = dc->ctx;
+	int i, j;
+
+	stream_status = dc_stream_get_status(stream);
+	context = dc->current_state;
+
+	update_type = dc_check_update_surfaces_for_stream(
+				dc, srf_updates, surface_count, stream_update, stream_status);
+
+	if (update_type >= update_surface_trace_level)
+		update_surface_trace(dc, srf_updates, surface_count);
+
+
+	if (update_type >= UPDATE_TYPE_FULL) {
+
+		/* initialize scratch memory for building context */
+		context = dc_create_state();
+		if (context == NULL) {
+			DC_ERROR("Failed to allocate new validate context!\n");
+			return;
+		}
+
+		dc_resource_state_copy_construct(state, context);
+	}
+
+
+	for (i = 0; i < surface_count; i++) {
+		struct dc_plane_state *surface = srf_updates[i].surface;
+
+		/* TODO: On flip we don't build the state, so it still has the
+		 * old address. Which is why we are updating the address here
+		 */
+		if (srf_updates[i].flip_addr) {
+			surface->address = srf_updates[i].flip_addr->address;
+			surface->flip_immediate = srf_updates[i].flip_addr->flip_immediate;
+
+		}
+
+		if (update_type >= UPDATE_TYPE_MED) {
+			for (j = 0; j < dc->res_pool->pipe_count; j++) {
+				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+				if (pipe_ctx->plane_state != surface)
+					continue;
+
+				resource_build_scaling_params(pipe_ctx);
+			}
+		}
+	}
+
+	commit_planes_for_stream(
+				dc,
+				srf_updates,
+				surface_count,
+				stream,
+				stream_update,
+				update_type,
+				context);
+
+	if (update_type >= UPDATE_TYPE_FULL)
+		dc_post_update_surfaces_to_stream(dc);
+
+	if (dc->current_state != context) {
+
+		struct dc_state *old = dc->current_state;
+
+		dc->current_state = context;
+		dc_release_state(old);
+
+	}
+
+	return;
+
+}
+
+uint8_t dc_get_current_stream_count(struct dc *dc)
+{
+	return dc->current_state->stream_count;
+}
+
+struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
+{
+	if (i < dc->current_state->stream_count)
+		return dc->current_state->streams[i];
+	return NULL;
+}
+
+enum dc_irq_source dc_interrupt_to_irq_source(
+		struct dc *dc,
+		uint32_t src_id,
+		uint32_t ext_id)
+{
+	return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
+}
+
+void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
+{
+
+	if (dc == NULL)
+		return;
+
+	dal_irq_service_set(dc->res_pool->irqs, src, enable);
+}
+
+void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
+{
+	dal_irq_service_ack(dc->res_pool->irqs, src);
+}
+
+void dc_set_power_state(
+	struct dc *dc,
+	enum dc_acpi_cm_power_state power_state)
+{
+	struct kref refcount;
+
+	switch (power_state) {
+	case DC_ACPI_CM_POWER_STATE_D0:
+		dc_resource_state_construct(dc, dc->current_state);
+
+		dc->hwss.init_hw(dc);
+		break;
+	default:
+
+		dc->hwss.power_down(dc);
+
+		/* Zero out the current context so that on resume we start with
+		 * clean state, and dc hw programming optimizations will not
+		 * cause any trouble.
+		 */
+
+		/* Preserve refcount */
+		refcount = dc->current_state->refcount;
+		dc_resource_state_destruct(dc->current_state);
+		memset(dc->current_state, 0,
+				sizeof(*dc->current_state));
+
+		dc->current_state->refcount = refcount;
+
+		break;
+	}
+
+}
+
+void dc_resume(struct dc *dc)
+{
+
+	uint32_t i;
+
+	for (i = 0; i < dc->link_count; i++)
+		core_link_resume(dc->links[i]);
+}
+
+bool dc_submit_i2c(
+		struct dc *dc,
+		uint32_t link_index,
+		struct i2c_command *cmd)
+{
+
+	struct dc_link *link = dc->links[link_index];
+	struct ddc_service *ddc = link->ddc;
+
+	return dal_i2caux_submit_i2c_command(
+		ddc->ctx->i2caux,
+		ddc->ddc_pin,
+		cmd);
+}
+
+static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
+{
+	if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	dc_sink_retain(sink);
+
+	dc_link->remote_sinks[dc_link->sink_count] = sink;
+	dc_link->sink_count++;
+
+	return true;
+}
+
+struct dc_sink *dc_link_add_remote_sink(
+		struct dc_link *link,
+		const uint8_t *edid,
+		int len,
+		struct dc_sink_init_data *init_data)
+{
+	struct dc_sink *dc_sink;
+	enum dc_edid_status edid_status;
+
+	if (len > MAX_EDID_BUFFER_SIZE) {
+		dm_error("Max EDID buffer size breached!\n");
+		return NULL;
+	}
+
+	if (!init_data) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	if (!init_data->link) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	dc_sink = dc_sink_create(init_data);
+
+	if (!dc_sink)
+		return NULL;
+
+	memmove(dc_sink->dc_edid.raw_edid, edid, len);
+	dc_sink->dc_edid.length = len;
+
+	if (!link_add_remote_sink_helper(
+			link,
+			dc_sink))
+		goto fail_add_sink;
+
+	edid_status = dm_helpers_parse_edid_caps(
+			link->ctx,
+			&dc_sink->dc_edid,
+			&dc_sink->edid_caps);
+
+	if (edid_status != EDID_OK)
+		goto fail;
+
+	return dc_sink;
+fail:
+	dc_link_remove_remote_sink(link, dc_sink);
+fail_add_sink:
+	dc_sink_release(dc_sink);
+	return NULL;
+}
+
+void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
+{
+	int i;
+
+	if (!link->sink_count) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	for (i = 0; i < link->sink_count; i++) {
+		if (link->remote_sinks[i] == sink) {
+			dc_sink_release(sink);
+			link->remote_sinks[i] = NULL;
+
+			/* shrink array to remove empty place */
+			while (i < link->sink_count - 1) {
+				link->remote_sinks[i] = link->remote_sinks[i+1];
+				i++;
+			}
+			link->remote_sinks[i] = NULL;
+			link->sink_count--;
+			return;
+		}
+	}
+}

+ 359 - 0
drivers/gpu/drm/amd/display/dc/core/dc_debug.c

@@ -0,0 +1,359 @@
+/*
+ * dc_debug.c
+ *
+ *  Created on: Nov 3, 2016
+ *      Author: yonsun
+ */
+
+#include "dm_services.h"
+
+#include "dc.h"
+
+#include "core_status.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+
+#include "resource.h"
+
+#define SURFACE_TRACE(...) do {\
+		if (dc->debug.surface_trace) \
+			dm_logger_write(logger, \
+					LOG_IF_TRACE, \
+					##__VA_ARGS__); \
+} while (0)
+
+#define TIMING_TRACE(...) do {\
+	if (dc->debug.timing_trace) \
+		dm_logger_write(logger, \
+				LOG_SYNC, \
+				##__VA_ARGS__); \
+} while (0)
+
+#define CLOCK_TRACE(...) do {\
+	if (dc->debug.clock_trace) \
+		dm_logger_write(logger, \
+				LOG_BANDWIDTH_CALCS, \
+				##__VA_ARGS__); \
+} while (0)
+
+void pre_surface_trace(
+		struct dc *dc,
+		const struct dc_plane_state *const *plane_states,
+		int surface_count)
+{
+	int i;
+	struct dc  *core_dc = dc;
+	struct dal_logger *logger =  core_dc->ctx->logger;
+
+	for (i = 0; i < surface_count; i++) {
+		const struct dc_plane_state *plane_state = plane_states[i];
+
+		SURFACE_TRACE("Planes %d:\n", i);
+
+		SURFACE_TRACE(
+				"plane_state->visible = %d;\n"
+				"plane_state->flip_immediate = %d;\n"
+				"plane_state->address.type = %d;\n"
+				"plane_state->address.grph.addr.quad_part = 0x%X;\n"
+				"plane_state->address.grph.meta_addr.quad_part = 0x%X;\n"
+				"plane_state->scaling_quality.h_taps = %d;\n"
+				"plane_state->scaling_quality.v_taps = %d;\n"
+				"plane_state->scaling_quality.h_taps_c = %d;\n"
+				"plane_state->scaling_quality.v_taps_c = %d;\n",
+				plane_state->visible,
+				plane_state->flip_immediate,
+				plane_state->address.type,
+				plane_state->address.grph.addr.quad_part,
+				plane_state->address.grph.meta_addr.quad_part,
+				plane_state->scaling_quality.h_taps,
+				plane_state->scaling_quality.v_taps,
+				plane_state->scaling_quality.h_taps_c,
+				plane_state->scaling_quality.v_taps_c);
+
+		SURFACE_TRACE(
+				"plane_state->src_rect.x = %d;\n"
+				"plane_state->src_rect.y = %d;\n"
+				"plane_state->src_rect.width = %d;\n"
+				"plane_state->src_rect.height = %d;\n"
+				"plane_state->dst_rect.x = %d;\n"
+				"plane_state->dst_rect.y = %d;\n"
+				"plane_state->dst_rect.width = %d;\n"
+				"plane_state->dst_rect.height = %d;\n"
+				"plane_state->clip_rect.x = %d;\n"
+				"plane_state->clip_rect.y = %d;\n"
+				"plane_state->clip_rect.width = %d;\n"
+				"plane_state->clip_rect.height = %d;\n",
+				plane_state->src_rect.x,
+				plane_state->src_rect.y,
+				plane_state->src_rect.width,
+				plane_state->src_rect.height,
+				plane_state->dst_rect.x,
+				plane_state->dst_rect.y,
+				plane_state->dst_rect.width,
+				plane_state->dst_rect.height,
+				plane_state->clip_rect.x,
+				plane_state->clip_rect.y,
+				plane_state->clip_rect.width,
+				plane_state->clip_rect.height);
+
+		SURFACE_TRACE(
+				"plane_state->plane_size.grph.surface_size.x = %d;\n"
+				"plane_state->plane_size.grph.surface_size.y = %d;\n"
+				"plane_state->plane_size.grph.surface_size.width = %d;\n"
+				"plane_state->plane_size.grph.surface_size.height = %d;\n"
+				"plane_state->plane_size.grph.surface_pitch = %d;\n",
+				plane_state->plane_size.grph.surface_size.x,
+				plane_state->plane_size.grph.surface_size.y,
+				plane_state->plane_size.grph.surface_size.width,
+				plane_state->plane_size.grph.surface_size.height,
+				plane_state->plane_size.grph.surface_pitch);
+
+
+		SURFACE_TRACE(
+				"plane_state->tiling_info.gfx8.num_banks = %d;\n"
+				"plane_state->tiling_info.gfx8.bank_width = %d;\n"
+				"plane_state->tiling_info.gfx8.bank_width_c = %d;\n"
+				"plane_state->tiling_info.gfx8.bank_height = %d;\n"
+				"plane_state->tiling_info.gfx8.bank_height_c = %d;\n"
+				"plane_state->tiling_info.gfx8.tile_aspect = %d;\n"
+				"plane_state->tiling_info.gfx8.tile_aspect_c = %d;\n"
+				"plane_state->tiling_info.gfx8.tile_split = %d;\n"
+				"plane_state->tiling_info.gfx8.tile_split_c = %d;\n"
+				"plane_state->tiling_info.gfx8.tile_mode = %d;\n"
+				"plane_state->tiling_info.gfx8.tile_mode_c = %d;\n",
+				plane_state->tiling_info.gfx8.num_banks,
+				plane_state->tiling_info.gfx8.bank_width,
+				plane_state->tiling_info.gfx8.bank_width_c,
+				plane_state->tiling_info.gfx8.bank_height,
+				plane_state->tiling_info.gfx8.bank_height_c,
+				plane_state->tiling_info.gfx8.tile_aspect,
+				plane_state->tiling_info.gfx8.tile_aspect_c,
+				plane_state->tiling_info.gfx8.tile_split,
+				plane_state->tiling_info.gfx8.tile_split_c,
+				plane_state->tiling_info.gfx8.tile_mode,
+				plane_state->tiling_info.gfx8.tile_mode_c);
+
+		SURFACE_TRACE(
+				"plane_state->tiling_info.gfx8.pipe_config = %d;\n"
+				"plane_state->tiling_info.gfx8.array_mode = %d;\n"
+				"plane_state->color_space = %d;\n"
+				"plane_state->dcc.enable = %d;\n"
+				"plane_state->format = %d;\n"
+				"plane_state->rotation = %d;\n"
+				"plane_state->stereo_format = %d;\n",
+				plane_state->tiling_info.gfx8.pipe_config,
+				plane_state->tiling_info.gfx8.array_mode,
+				plane_state->color_space,
+				plane_state->dcc.enable,
+				plane_state->format,
+				plane_state->rotation,
+				plane_state->stereo_format);
+
+		SURFACE_TRACE("plane_state->tiling_info.gfx9.swizzle = %d;\n",
+				plane_state->tiling_info.gfx9.swizzle);
+
+		SURFACE_TRACE("\n");
+	}
+	SURFACE_TRACE("\n");
+}
+
+void update_surface_trace(
+		struct dc *dc,
+		const struct dc_surface_update *updates,
+		int surface_count)
+{
+	int i;
+	struct dc  *core_dc = dc;
+	struct dal_logger *logger =  core_dc->ctx->logger;
+
+	for (i = 0; i < surface_count; i++) {
+		const struct dc_surface_update *update = &updates[i];
+
+		SURFACE_TRACE("Update %d\n", i);
+		if (update->flip_addr) {
+			SURFACE_TRACE("flip_addr->address.type = %d;\n"
+					"flip_addr->address.grph.addr.quad_part = 0x%X;\n"
+					"flip_addr->address.grph.meta_addr.quad_part = 0x%X;\n"
+					"flip_addr->flip_immediate = %d;\n",
+					update->flip_addr->address.type,
+					update->flip_addr->address.grph.addr.quad_part,
+					update->flip_addr->address.grph.meta_addr.quad_part,
+					update->flip_addr->flip_immediate);
+		}
+
+		if (update->plane_info) {
+			SURFACE_TRACE(
+					"plane_info->color_space = %d;\n"
+					"plane_info->format = %d;\n"
+					"plane_info->plane_size.grph.surface_pitch = %d;\n"
+					"plane_info->plane_size.grph.surface_size.height = %d;\n"
+					"plane_info->plane_size.grph.surface_size.width = %d;\n"
+					"plane_info->plane_size.grph.surface_size.x = %d;\n"
+					"plane_info->plane_size.grph.surface_size.y = %d;\n"
+					"plane_info->rotation = %d;\n",
+					update->plane_info->color_space,
+					update->plane_info->format,
+					update->plane_info->plane_size.grph.surface_pitch,
+					update->plane_info->plane_size.grph.surface_size.height,
+					update->plane_info->plane_size.grph.surface_size.width,
+					update->plane_info->plane_size.grph.surface_size.x,
+					update->plane_info->plane_size.grph.surface_size.y,
+					update->plane_info->rotation,
+					update->plane_info->stereo_format);
+
+			SURFACE_TRACE(
+					"plane_info->tiling_info.gfx8.num_banks = %d;\n"
+					"plane_info->tiling_info.gfx8.bank_width = %d;\n"
+					"plane_info->tiling_info.gfx8.bank_width_c = %d;\n"
+					"plane_info->tiling_info.gfx8.bank_height = %d;\n"
+					"plane_info->tiling_info.gfx8.bank_height_c = %d;\n"
+					"plane_info->tiling_info.gfx8.tile_aspect = %d;\n"
+					"plane_info->tiling_info.gfx8.tile_aspect_c = %d;\n"
+					"plane_info->tiling_info.gfx8.tile_split = %d;\n"
+					"plane_info->tiling_info.gfx8.tile_split_c = %d;\n"
+					"plane_info->tiling_info.gfx8.tile_mode = %d;\n"
+					"plane_info->tiling_info.gfx8.tile_mode_c = %d;\n",
+					update->plane_info->tiling_info.gfx8.num_banks,
+					update->plane_info->tiling_info.gfx8.bank_width,
+					update->plane_info->tiling_info.gfx8.bank_width_c,
+					update->plane_info->tiling_info.gfx8.bank_height,
+					update->plane_info->tiling_info.gfx8.bank_height_c,
+					update->plane_info->tiling_info.gfx8.tile_aspect,
+					update->plane_info->tiling_info.gfx8.tile_aspect_c,
+					update->plane_info->tiling_info.gfx8.tile_split,
+					update->plane_info->tiling_info.gfx8.tile_split_c,
+					update->plane_info->tiling_info.gfx8.tile_mode,
+					update->plane_info->tiling_info.gfx8.tile_mode_c);
+
+			SURFACE_TRACE(
+					"plane_info->tiling_info.gfx8.pipe_config = %d;\n"
+					"plane_info->tiling_info.gfx8.array_mode = %d;\n"
+					"plane_info->visible = %d;\n"
+					"plane_info->per_pixel_alpha = %d;\n",
+					update->plane_info->tiling_info.gfx8.pipe_config,
+					update->plane_info->tiling_info.gfx8.array_mode,
+					update->plane_info->visible,
+					update->plane_info->per_pixel_alpha);
+
+			SURFACE_TRACE("surface->tiling_info.gfx9.swizzle = %d;\n",
+					update->plane_info->tiling_info.gfx9.swizzle);
+		}
+
+		if (update->scaling_info) {
+			SURFACE_TRACE(
+					"scaling_info->src_rect.x = %d;\n"
+					"scaling_info->src_rect.y = %d;\n"
+					"scaling_info->src_rect.width = %d;\n"
+					"scaling_info->src_rect.height = %d;\n"
+					"scaling_info->dst_rect.x = %d;\n"
+					"scaling_info->dst_rect.y = %d;\n"
+					"scaling_info->dst_rect.width = %d;\n"
+					"scaling_info->dst_rect.height = %d;\n"
+					"scaling_info->clip_rect.x = %d;\n"
+					"scaling_info->clip_rect.y = %d;\n"
+					"scaling_info->clip_rect.width = %d;\n"
+					"scaling_info->clip_rect.height = %d;\n"
+					"scaling_info->scaling_quality.h_taps = %d;\n"
+					"scaling_info->scaling_quality.v_taps = %d;\n"
+					"scaling_info->scaling_quality.h_taps_c = %d;\n"
+					"scaling_info->scaling_quality.v_taps_c = %d;\n",
+					update->scaling_info->src_rect.x,
+					update->scaling_info->src_rect.y,
+					update->scaling_info->src_rect.width,
+					update->scaling_info->src_rect.height,
+					update->scaling_info->dst_rect.x,
+					update->scaling_info->dst_rect.y,
+					update->scaling_info->dst_rect.width,
+					update->scaling_info->dst_rect.height,
+					update->scaling_info->clip_rect.x,
+					update->scaling_info->clip_rect.y,
+					update->scaling_info->clip_rect.width,
+					update->scaling_info->clip_rect.height,
+					update->scaling_info->scaling_quality.h_taps,
+					update->scaling_info->scaling_quality.v_taps,
+					update->scaling_info->scaling_quality.h_taps_c,
+					update->scaling_info->scaling_quality.v_taps_c);
+		}
+		SURFACE_TRACE("\n");
+	}
+	SURFACE_TRACE("\n");
+}
+
+void post_surface_trace(struct dc *dc)
+{
+	struct dc  *core_dc = dc;
+	struct dal_logger *logger =  core_dc->ctx->logger;
+
+	SURFACE_TRACE("post surface process.\n");
+
+}
+
+void context_timing_trace(
+		struct dc *dc,
+		struct resource_context *res_ctx)
+{
+	int i;
+	struct dc  *core_dc = dc;
+	struct dal_logger *logger =  core_dc->ctx->logger;
+	int h_pos[MAX_PIPES], v_pos[MAX_PIPES];
+	struct crtc_position position;
+	unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
+
+
+	for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+		/* get_position() returns CRTC vertical/horizontal counter
+		 * hence not applicable for underlay pipe
+		 */
+		if (pipe_ctx->stream == NULL
+				 || pipe_ctx->pipe_idx == underlay_idx)
+			continue;
+
+		pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position);
+		h_pos[i] = position.horizontal_count;
+		v_pos[i] = position.vertical_count;
+	}
+	for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+		if (pipe_ctx->stream == NULL)
+			continue;
+
+		TIMING_TRACE("OTG_%d   H_tot:%d  V_tot:%d   H_pos:%d  V_pos:%d\n",
+				pipe_ctx->stream_res.tg->inst,
+				pipe_ctx->stream->timing.h_total,
+				pipe_ctx->stream->timing.v_total,
+				h_pos[i], v_pos[i]);
+	}
+}
+
+void context_clock_trace(
+		struct dc *dc,
+		struct dc_state *context)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+	struct dc  *core_dc = dc;
+	struct dal_logger *logger =  core_dc->ctx->logger;
+
+	CLOCK_TRACE("Current: dispclk_khz:%d  dppclk_div:%d  dcfclk_khz:%d\n"
+			"dcfclk_deep_sleep_khz:%d  fclk_khz:%d\n"
+			"dram_ccm_us:%d  min_active_dram_ccm_us:%d\n",
+			context->bw.dcn.calc_clk.dispclk_khz,
+			context->bw.dcn.calc_clk.dppclk_div,
+			context->bw.dcn.calc_clk.dcfclk_khz,
+			context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+			context->bw.dcn.calc_clk.fclk_khz,
+			context->bw.dcn.calc_clk.dram_ccm_us,
+			context->bw.dcn.calc_clk.min_active_dram_ccm_us);
+	CLOCK_TRACE("Calculated: dispclk_khz:%d  dppclk_div:%d  dcfclk_khz:%d\n"
+			"dcfclk_deep_sleep_khz:%d  fclk_khz:%d\n"
+			"dram_ccm_us:%d  min_active_dram_ccm_us:%d\n",
+			context->bw.dcn.calc_clk.dispclk_khz,
+			context->bw.dcn.calc_clk.dppclk_div,
+			context->bw.dcn.calc_clk.dcfclk_khz,
+			context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+			context->bw.dcn.calc_clk.fclk_khz,
+			context->bw.dcn.calc_clk.dram_ccm_us,
+			context->bw.dcn.calc_clk.min_active_dram_ccm_us);
+#endif
+}

+ 101 - 0
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c

@@ -0,0 +1,101 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "core_types.h"
+#include "timing_generator.h"
+#include "hw_sequencer.h"
+
+/* used as index in array of black_color_format */
+enum black_color_format {
+	BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0,
+	BLACK_COLOR_FORMAT_RGB_LIMITED,
+	BLACK_COLOR_FORMAT_YUV_TV,
+	BLACK_COLOR_FORMAT_YUV_CV,
+	BLACK_COLOR_FORMAT_YUV_SUPER_AA,
+	BLACK_COLOR_FORMAT_DEBUG,
+};
+
+static const struct tg_color black_color_format[] = {
+	/* BlackColorFormat_RGB_FullRange */
+	{0, 0, 0},
+	/* BlackColorFormat_RGB_Limited */
+	{0x40, 0x40, 0x40},
+	/* BlackColorFormat_YUV_TV */
+	{0x200, 0x40, 0x200},
+	/* BlackColorFormat_YUV_CV */
+	{0x1f4, 0x40, 0x1f4},
+	/* BlackColorFormat_YUV_SuperAA */
+	{0x1a2, 0x20, 0x1a2},
+	/* visual confirm debug */
+	{0xff, 0xff, 0},
+};
+
+void color_space_to_black_color(
+	const struct dc *dc,
+	enum dc_color_space colorspace,
+	struct tg_color *black_color)
+{
+	switch (colorspace) {
+	case COLOR_SPACE_YCBCR601:
+	case COLOR_SPACE_YCBCR709:
+	case COLOR_SPACE_YCBCR601_LIMITED:
+	case COLOR_SPACE_YCBCR709_LIMITED:
+		*black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV];
+		break;
+
+	case COLOR_SPACE_SRGB_LIMITED:
+		*black_color =
+			black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED];
+		break;
+
+	default:
+		/* fefault is sRGB black (full range). */
+		*black_color =
+			black_color_format[BLACK_COLOR_FORMAT_RGB_FULLRANGE];
+		/* default is sRGB black 0. */
+		break;
+	}
+}
+
+bool hwss_wait_for_blank_complete(
+		struct timing_generator *tg)
+{
+	int counter;
+
+	for (counter = 0; counter < 100; counter++) {
+		if (tg->funcs->is_blanked(tg))
+			break;
+
+		msleep(1);
+	}
+
+	if (counter == 100) {
+		dm_error("DC: failed to blank crtc!\n");
+		return false;
+	}
+
+	return true;
+}

+ 2367 - 0
drivers/gpu/drm/amd/display/dc/core/dc_link.c

@@ -0,0 +1,2367 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "atom.h"
+#include "dm_helpers.h"
+#include "dc.h"
+#include "grph_object_id.h"
+#include "gpio_service_interface.h"
+#include "core_status.h"
+#include "dc_link_dp.h"
+#include "dc_link_ddc.h"
+#include "link_hwss.h"
+
+#include "link_encoder.h"
+#include "hw_sequencer.h"
+#include "resource.h"
+#include "abm.h"
+#include "fixed31_32.h"
+#include "dpcd_defs.h"
+#include "dmcu.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_enum.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define LINK_INFO(...) \
+	dm_logger_write(dc_ctx->logger, LOG_HW_HOTPLUG, \
+		__VA_ARGS__)
+
+/*******************************************************************************
+ * Private structures
+ ******************************************************************************/
+
+enum {
+	LINK_RATE_REF_FREQ_IN_MHZ = 27,
+	PEAK_FACTOR_X1000 = 1006
+};
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void destruct(struct dc_link *link)
+{
+	int i;
+
+	if (link->ddc)
+		dal_ddc_service_destroy(&link->ddc);
+
+	if(link->link_enc)
+		link->link_enc->funcs->destroy(&link->link_enc);
+
+	if (link->local_sink)
+		dc_sink_release(link->local_sink);
+
+	for (i = 0; i < link->sink_count; ++i)
+		dc_sink_release(link->remote_sinks[i]);
+}
+
+struct gpio *get_hpd_gpio(struct dc_bios *dcb,
+		struct graphics_object_id link_id,
+		struct gpio_service *gpio_service)
+{
+	enum bp_result bp_result;
+	struct graphics_object_hpd_info hpd_info;
+	struct gpio_pin_info pin_info;
+
+	if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK)
+		return NULL;
+
+	bp_result = dcb->funcs->get_gpio_pin_info(dcb,
+		hpd_info.hpd_int_gpio_uid, &pin_info);
+
+	if (bp_result != BP_RESULT_OK) {
+		ASSERT(bp_result == BP_RESULT_NORECORD);
+		return NULL;
+	}
+
+	return dal_gpio_service_create_irq(
+		gpio_service,
+		pin_info.offset,
+		pin_info.mask);
+}
+
+/*
+ *  Function: program_hpd_filter
+ *
+ *  @brief
+ *     Programs HPD filter on associated HPD line
+ *
+ *  @param [in] delay_on_connect_in_ms: Connect filter timeout
+ *  @param [in] delay_on_disconnect_in_ms: Disconnect filter timeout
+ *
+ *  @return
+ *     true on success, false otherwise
+ */
+static bool program_hpd_filter(
+	const struct dc_link *link)
+{
+	bool result = false;
+
+	struct gpio *hpd;
+
+	int delay_on_connect_in_ms = 0;
+	int delay_on_disconnect_in_ms = 0;
+
+	/* Verify feature is supported */
+	switch (link->connector_signal) {
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		/* Program hpd filter */
+		delay_on_connect_in_ms = 500;
+		delay_on_disconnect_in_ms = 100;
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		/* Program hpd filter to allow DP signal to settle */
+		/* 500:	not able to detect MST <-> SST switch as HPD is low for
+		 * 	only 100ms on DELL U2413
+		 * 0:	some passive dongle still show aux mode instead of i2c
+		 * 20-50:not enough to hide bouncing HPD with passive dongle.
+		 * 	also see intermittent i2c read issues.
+		 */
+		delay_on_connect_in_ms = 80;
+		delay_on_disconnect_in_ms = 0;
+		break;
+	case SIGNAL_TYPE_LVDS:
+	case SIGNAL_TYPE_EDP:
+	default:
+		/* Don't program hpd filter */
+		return false;
+	}
+
+	/* Obtain HPD handle */
+	hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+
+	if (!hpd)
+		return result;
+
+	/* Setup HPD filtering */
+	if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
+		struct gpio_hpd_config config;
+
+		config.delay_on_connect = delay_on_connect_in_ms;
+		config.delay_on_disconnect = delay_on_disconnect_in_ms;
+
+		dal_irq_setup_hpd_filter(hpd, &config);
+
+		dal_gpio_close(hpd);
+
+		result = true;
+	} else {
+		ASSERT_CRITICAL(false);
+	}
+
+	/* Release HPD handle */
+	dal_gpio_destroy_irq(&hpd);
+
+	return result;
+}
+
+static bool detect_sink(struct dc_link *link, enum dc_connection_type *type)
+{
+	uint32_t is_hpd_high = 0;
+	struct gpio *hpd_pin;
+
+	/* todo: may need to lock gpio access */
+	hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+	if (hpd_pin == NULL)
+		goto hpd_gpio_failure;
+
+	dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
+	dal_gpio_get_value(hpd_pin, &is_hpd_high);
+	dal_gpio_close(hpd_pin);
+	dal_gpio_destroy_irq(&hpd_pin);
+
+	if (is_hpd_high) {
+		*type = dc_connection_single;
+		/* TODO: need to do the actual detection */
+	} else {
+		*type = dc_connection_none;
+	}
+
+	return true;
+
+hpd_gpio_failure:
+	return false;
+}
+
+static enum ddc_transaction_type get_ddc_transaction_type(
+		enum signal_type sink_signal)
+{
+	enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
+
+	switch (sink_signal) {
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+	case SIGNAL_TYPE_LVDS:
+	case SIGNAL_TYPE_RGB:
+		transaction_type = DDC_TRANSACTION_TYPE_I2C;
+		break;
+
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_EDP:
+		transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+		break;
+
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		/* MST does not use I2COverAux, but there is the
+		 * SPECIAL use case for "immediate dwnstrm device
+		 * access" (EPR#370830). */
+		transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+		break;
+
+	default:
+		break;
+	}
+
+	return transaction_type;
+}
+
+static enum signal_type get_basic_signal_type(
+	struct graphics_object_id encoder,
+	struct graphics_object_id downstream)
+{
+	if (downstream.type == OBJECT_TYPE_CONNECTOR) {
+		switch (downstream.id) {
+		case CONNECTOR_ID_SINGLE_LINK_DVII:
+			switch (encoder.id) {
+			case ENCODER_ID_INTERNAL_DAC1:
+			case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+			case ENCODER_ID_INTERNAL_DAC2:
+			case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+				return SIGNAL_TYPE_RGB;
+			default:
+				return SIGNAL_TYPE_DVI_SINGLE_LINK;
+			}
+		break;
+		case CONNECTOR_ID_DUAL_LINK_DVII:
+		{
+			switch (encoder.id) {
+			case ENCODER_ID_INTERNAL_DAC1:
+			case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+			case ENCODER_ID_INTERNAL_DAC2:
+			case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+				return SIGNAL_TYPE_RGB;
+			default:
+				return SIGNAL_TYPE_DVI_DUAL_LINK;
+			}
+		}
+		break;
+		case CONNECTOR_ID_SINGLE_LINK_DVID:
+			return SIGNAL_TYPE_DVI_SINGLE_LINK;
+		case CONNECTOR_ID_DUAL_LINK_DVID:
+			return SIGNAL_TYPE_DVI_DUAL_LINK;
+		case CONNECTOR_ID_VGA:
+			return SIGNAL_TYPE_RGB;
+		case CONNECTOR_ID_HDMI_TYPE_A:
+			return SIGNAL_TYPE_HDMI_TYPE_A;
+		case CONNECTOR_ID_LVDS:
+			return SIGNAL_TYPE_LVDS;
+		case CONNECTOR_ID_DISPLAY_PORT:
+			return SIGNAL_TYPE_DISPLAY_PORT;
+		case CONNECTOR_ID_EDP:
+			return SIGNAL_TYPE_EDP;
+		default:
+			return SIGNAL_TYPE_NONE;
+		}
+	} else if (downstream.type == OBJECT_TYPE_ENCODER) {
+		switch (downstream.id) {
+		case ENCODER_ID_EXTERNAL_NUTMEG:
+		case ENCODER_ID_EXTERNAL_TRAVIS:
+			return SIGNAL_TYPE_DISPLAY_PORT;
+		default:
+			return SIGNAL_TYPE_NONE;
+		}
+	}
+
+	return SIGNAL_TYPE_NONE;
+}
+
+/*
+ * @brief
+ * Check whether there is a dongle on DP connector
+ */
+static bool is_dp_sink_present(struct dc_link *link)
+{
+	enum gpio_result gpio_result;
+	uint32_t clock_pin = 0;
+
+	struct ddc *ddc;
+
+	enum connector_id connector_id =
+		dal_graphics_object_id_get_connector_id(link->link_id);
+
+	bool present =
+		((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
+		(connector_id == CONNECTOR_ID_EDP));
+
+	ddc = dal_ddc_service_get_ddc_pin(link->ddc);
+
+	if (!ddc) {
+		BREAK_TO_DEBUGGER();
+		return present;
+	}
+
+	/* Open GPIO and set it to I2C mode */
+	/* Note: this GpioMode_Input will be converted
+	 * to GpioConfigType_I2cAuxDualMode in GPIO component,
+	 * which indicates we need additional delay */
+
+	if (GPIO_RESULT_OK != dal_ddc_open(
+		ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
+		dal_gpio_destroy_ddc(&ddc);
+
+		return present;
+	}
+
+	/* Read GPIO: DP sink is present if both clock and data pins are zero */
+	/* [anaumov] in DAL2, there was no check for GPIO failure */
+
+	gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
+	ASSERT(gpio_result == GPIO_RESULT_OK);
+
+	present = (gpio_result == GPIO_RESULT_OK) && !clock_pin;
+
+	dal_ddc_close(ddc);
+
+	return present;
+}
+
+/*
+ * @brief
+ * Detect output sink type
+ */
+static enum signal_type link_detect_sink(
+	struct dc_link *link,
+	enum dc_detect_reason reason)
+{
+	enum signal_type result = get_basic_signal_type(
+		link->link_enc->id, link->link_id);
+
+	/* Internal digital encoder will detect only dongles
+	 * that require digital signal */
+
+	/* Detection mechanism is different
+	 * for different native connectors.
+	 * LVDS connector supports only LVDS signal;
+	 * PCIE is a bus slot, the actual connector needs to be detected first;
+	 * eDP connector supports only eDP signal;
+	 * HDMI should check straps for audio */
+
+	/* PCIE detects the actual connector on add-on board */
+
+	if (link->link_id.id == CONNECTOR_ID_PCIE) {
+		/* ZAZTODO implement PCIE add-on card detection */
+	}
+
+	switch (link->link_id.id) {
+	case CONNECTOR_ID_HDMI_TYPE_A: {
+		/* check audio support:
+		 * if native HDMI is not supported, switch to DVI */
+		struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+
+		if (!aud_support->hdmi_audio_native)
+			if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A)
+				result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+	}
+	break;
+	case CONNECTOR_ID_DISPLAY_PORT: {
+		/* DP HPD short pulse. Passive DP dongle will not
+		 * have short pulse
+		 */
+		if (reason != DETECT_REASON_HPDRX) {
+			/* Check whether DP signal detected: if not -
+			 * we assume signal is DVI; it could be corrected
+			 * to HDMI after dongle detection
+			 */
+			if (!is_dp_sink_present(link))
+				result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+		}
+	}
+	break;
+	default:
+	break;
+	}
+
+	return result;
+}
+
+static enum signal_type decide_signal_from_strap_and_dongle_type(
+		enum display_dongle_type dongle_type,
+		struct audio_support *audio_support)
+{
+	enum signal_type signal = SIGNAL_TYPE_NONE;
+
+	switch (dongle_type) {
+	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
+		if (audio_support->hdmi_audio_on_dongle)
+			signal =  SIGNAL_TYPE_HDMI_TYPE_A;
+		else
+			signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+		break;
+	case DISPLAY_DONGLE_DP_DVI_DONGLE:
+		signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+		break;
+	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
+		if (audio_support->hdmi_audio_native)
+			signal =  SIGNAL_TYPE_HDMI_TYPE_A;
+		else
+			signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+		break;
+	default:
+		signal = SIGNAL_TYPE_NONE;
+		break;
+	}
+
+	return signal;
+}
+
+static enum signal_type dp_passive_dongle_detection(
+		struct ddc_service *ddc,
+		struct display_sink_capability *sink_cap,
+		struct audio_support *audio_support)
+{
+	dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
+						ddc, sink_cap);
+	return decide_signal_from_strap_and_dongle_type(
+			sink_cap->dongle_type,
+			audio_support);
+}
+
+static void link_disconnect_sink(struct dc_link *link)
+{
+	if (link->local_sink) {
+		dc_sink_release(link->local_sink);
+		link->local_sink = NULL;
+	}
+
+	link->dpcd_sink_count = 0;
+}
+
+static void detect_dp(
+	struct dc_link *link,
+	struct display_sink_capability *sink_caps,
+	bool *converter_disable_audio,
+	struct audio_support *audio_support,
+	enum dc_detect_reason reason)
+{
+	bool boot = false;
+	sink_caps->signal = link_detect_sink(link, reason);
+	sink_caps->transaction_type =
+		get_ddc_transaction_type(sink_caps->signal);
+
+	if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+		sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+		detect_dp_sink_caps(link);
+
+		/* DP active dongles */
+		if (is_dp_active_dongle(link)) {
+			link->type = dc_connection_active_dongle;
+			if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
+				/*
+				 * active dongle unplug processing for short irq
+				 */
+				link_disconnect_sink(link);
+				return;
+			}
+
+			if (link->dpcd_caps.dongle_type !=
+			DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
+				*converter_disable_audio = true;
+			}
+		}
+		if (is_mst_supported(link)) {
+			sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+			link->type = dc_connection_mst_branch;
+
+			/*
+			 * This call will initiate MST topology discovery. Which
+			 * will detect MST ports and add new DRM connector DRM
+			 * framework. Then read EDID via remote i2c over aux. In
+			 * the end, will notify DRM detect result and save EDID
+			 * into DRM framework.
+			 *
+			 * .detect is called by .fill_modes.
+			 * .fill_modes is called by user mode ioctl
+			 * DRM_IOCTL_MODE_GETCONNECTOR.
+			 *
+			 * .get_modes is called by .fill_modes.
+			 *
+			 * call .get_modes, AMDGPU DM implementation will create
+			 * new dc_sink and add to dc_link. For long HPD plug
+			 * in/out, MST has its own handle.
+			 *
+			 * Therefore, just after dc_create, link->sink is not
+			 * created for MST until user mode app calls
+			 * DRM_IOCTL_MODE_GETCONNECTOR.
+			 *
+			 * Need check ->sink usages in case ->sink = NULL
+			 * TODO: s3 resume check
+			 */
+			if (reason == DETECT_REASON_BOOT)
+				boot = true;
+
+			if (!dm_helpers_dp_mst_start_top_mgr(
+				link->ctx,
+				link, boot)) {
+				/* MST not supported */
+				link->type = dc_connection_single;
+				sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+			}
+		}
+	} else {
+		/* DP passive dongles */
+		sink_caps->signal = dp_passive_dongle_detection(link->ddc,
+				sink_caps,
+				audio_support);
+	}
+}
+
+bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+{
+	struct dc_sink_init_data sink_init_data = { 0 };
+	struct display_sink_capability sink_caps = { 0 };
+	uint8_t i;
+	bool converter_disable_audio = false;
+	struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+	enum dc_edid_status edid_status;
+	struct dc_context *dc_ctx = link->ctx;
+	struct dc_sink *sink = NULL;
+	enum dc_connection_type new_connection_type = dc_connection_none;
+
+	if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
+		return false;
+
+	if (false == detect_sink(link, &new_connection_type)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	if (link->connector_signal == SIGNAL_TYPE_EDP &&
+			link->local_sink)
+		return true;
+
+	link_disconnect_sink(link);
+
+	if (new_connection_type != dc_connection_none) {
+		link->type = new_connection_type;
+
+		/* From Disconnected-to-Connected. */
+		switch (link->connector_signal) {
+		case SIGNAL_TYPE_HDMI_TYPE_A: {
+			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+			if (aud_support->hdmi_audio_native)
+				sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+			else
+				sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+			break;
+		}
+
+		case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+			sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+			break;
+		}
+
+		case SIGNAL_TYPE_DVI_DUAL_LINK: {
+			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+			sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+			break;
+		}
+
+		case SIGNAL_TYPE_EDP: {
+			detect_edp_sink_caps(link);
+			sink_caps.transaction_type =
+				DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+			sink_caps.signal = SIGNAL_TYPE_EDP;
+			break;
+		}
+
+		case SIGNAL_TYPE_DISPLAY_PORT: {
+			detect_dp(
+				link,
+				&sink_caps,
+				&converter_disable_audio,
+				aud_support, reason);
+
+			/* Active dongle downstream unplug */
+			if (link->type == dc_connection_active_dongle
+					&& link->dpcd_caps.sink_count.
+					bits.SINK_COUNT == 0)
+				return true;
+
+			if (link->type == dc_connection_mst_branch) {
+				LINK_INFO("link=%d, mst branch is now Connected\n",
+					link->link_index);
+				/* Need to setup mst link_cap struct here
+				 * otherwise dc_link_detect() will leave mst link_cap
+				 * empty which leads to allocate_mst_payload() has "0"
+				 * pbn_per_slot value leading to exception on dal_fixed31_32_div()
+				 */
+				link->verified_link_cap = link->reported_link_cap;
+				return false;
+			}
+
+			break;
+		}
+
+		default:
+			DC_ERROR("Invalid connector type! signal:%d\n",
+				link->connector_signal);
+			return false;
+		} /* switch() */
+
+		if (link->dpcd_caps.sink_count.bits.SINK_COUNT)
+			link->dpcd_sink_count = link->dpcd_caps.sink_count.
+					bits.SINK_COUNT;
+		else
+			link->dpcd_sink_count = 1;
+
+		dal_ddc_service_set_transaction_type(
+						link->ddc,
+						sink_caps.transaction_type);
+
+		link->aux_mode = dal_ddc_service_is_in_aux_transaction_mode(
+				link->ddc);
+
+		sink_init_data.link = link;
+		sink_init_data.sink_signal = sink_caps.signal;
+
+		sink = dc_sink_create(&sink_init_data);
+		if (!sink) {
+			DC_ERROR("Failed to create sink!\n");
+			return false;
+		}
+
+		sink->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock;
+		sink->converter_disable_audio = converter_disable_audio;
+
+		link->local_sink = sink;
+
+		edid_status = dm_helpers_read_local_edid(
+				link->ctx,
+				link,
+				sink);
+
+		switch (edid_status) {
+		case EDID_BAD_CHECKSUM:
+			dm_logger_write(link->ctx->logger, LOG_ERROR,
+				"EDID checksum invalid.\n");
+			break;
+		case EDID_NO_RESPONSE:
+			dm_logger_write(link->ctx->logger, LOG_ERROR,
+				"No EDID read.\n");
+			return false;
+
+		default:
+			break;
+		}
+
+		if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+			sink_caps.transaction_type ==
+			DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+			/*
+			 * TODO debug why Dell 2413 doesn't like
+			 *  two link trainings
+			 */
+
+			/* deal with non-mst cases */
+			dp_hbr_verify_link_cap(link, &link->reported_link_cap);
+		}
+
+		/* HDMI-DVI Dongle */
+		if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
+				!sink->edid_caps.edid_hdmi)
+			sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+
+		/* Connectivity log: detection */
+		for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) {
+			CONN_DATA_DETECT(link,
+					&sink->dc_edid.raw_edid[i * EDID_BLOCK_SIZE],
+					EDID_BLOCK_SIZE,
+					"%s: [Block %d] ", sink->edid_caps.display_name, i);
+		}
+
+		dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER,
+			"%s: "
+			"manufacturer_id = %X, "
+			"product_id = %X, "
+			"serial_number = %X, "
+			"manufacture_week = %d, "
+			"manufacture_year = %d, "
+			"display_name = %s, "
+			"speaker_flag = %d, "
+			"audio_mode_count = %d\n",
+			__func__,
+			sink->edid_caps.manufacturer_id,
+			sink->edid_caps.product_id,
+			sink->edid_caps.serial_number,
+			sink->edid_caps.manufacture_week,
+			sink->edid_caps.manufacture_year,
+			sink->edid_caps.display_name,
+			sink->edid_caps.speaker_flags,
+			sink->edid_caps.audio_mode_count);
+
+		for (i = 0; i < sink->edid_caps.audio_mode_count; i++) {
+			dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER,
+				"%s: mode number = %d, "
+				"format_code = %d, "
+				"channel_count = %d, "
+				"sample_rate = %d, "
+				"sample_size = %d\n",
+				__func__,
+				i,
+				sink->edid_caps.audio_modes[i].format_code,
+				sink->edid_caps.audio_modes[i].channel_count,
+				sink->edid_caps.audio_modes[i].sample_rate,
+				sink->edid_caps.audio_modes[i].sample_size);
+		}
+
+	} else {
+		/* From Connected-to-Disconnected. */
+		if (link->type == dc_connection_mst_branch) {
+			LINK_INFO("link=%d, mst branch is now Disconnected\n",
+				link->link_index);
+
+			dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
+
+			link->mst_stream_alloc_table.stream_count = 0;
+			memset(link->mst_stream_alloc_table.stream_allocations, 0, sizeof(link->mst_stream_alloc_table.stream_allocations));
+		}
+
+		link->type = dc_connection_none;
+		sink_caps.signal = SIGNAL_TYPE_NONE;
+	}
+
+	LINK_INFO("link=%d, dc_sink_in=%p is now %s\n",
+		link->link_index, sink,
+		(sink_caps.signal == SIGNAL_TYPE_NONE ?
+			"Disconnected":"Connected"));
+
+	return true;
+}
+
+static enum hpd_source_id get_hpd_line(
+		struct dc_link *link)
+{
+	struct gpio *hpd;
+	enum hpd_source_id hpd_id = HPD_SOURCEID_UNKNOWN;
+
+	hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+
+	if (hpd) {
+		switch (dal_irq_get_source(hpd)) {
+		case DC_IRQ_SOURCE_HPD1:
+			hpd_id = HPD_SOURCEID1;
+		break;
+		case DC_IRQ_SOURCE_HPD2:
+			hpd_id = HPD_SOURCEID2;
+		break;
+		case DC_IRQ_SOURCE_HPD3:
+			hpd_id = HPD_SOURCEID3;
+		break;
+		case DC_IRQ_SOURCE_HPD4:
+			hpd_id = HPD_SOURCEID4;
+		break;
+		case DC_IRQ_SOURCE_HPD5:
+			hpd_id = HPD_SOURCEID5;
+		break;
+		case DC_IRQ_SOURCE_HPD6:
+			hpd_id = HPD_SOURCEID6;
+		break;
+		default:
+			BREAK_TO_DEBUGGER();
+		break;
+		}
+
+		dal_gpio_destroy_irq(&hpd);
+	}
+
+	return hpd_id;
+}
+
+static enum channel_id get_ddc_line(struct dc_link *link)
+{
+	struct ddc *ddc;
+	enum channel_id channel = CHANNEL_ID_UNKNOWN;
+
+	ddc = dal_ddc_service_get_ddc_pin(link->ddc);
+
+	if (ddc) {
+		switch (dal_ddc_get_line(ddc)) {
+		case GPIO_DDC_LINE_DDC1:
+			channel = CHANNEL_ID_DDC1;
+			break;
+		case GPIO_DDC_LINE_DDC2:
+			channel = CHANNEL_ID_DDC2;
+			break;
+		case GPIO_DDC_LINE_DDC3:
+			channel = CHANNEL_ID_DDC3;
+			break;
+		case GPIO_DDC_LINE_DDC4:
+			channel = CHANNEL_ID_DDC4;
+			break;
+		case GPIO_DDC_LINE_DDC5:
+			channel = CHANNEL_ID_DDC5;
+			break;
+		case GPIO_DDC_LINE_DDC6:
+			channel = CHANNEL_ID_DDC6;
+			break;
+		case GPIO_DDC_LINE_DDC_VGA:
+			channel = CHANNEL_ID_DDC_VGA;
+			break;
+		case GPIO_DDC_LINE_I2C_PAD:
+			channel = CHANNEL_ID_I2C_PAD;
+			break;
+		default:
+			BREAK_TO_DEBUGGER();
+			break;
+		}
+	}
+
+	return channel;
+}
+
+static enum transmitter translate_encoder_to_transmitter(
+	struct graphics_object_id encoder)
+{
+	switch (encoder.id) {
+	case ENCODER_ID_INTERNAL_UNIPHY:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_UNIPHY_A;
+		case ENUM_ID_2:
+			return TRANSMITTER_UNIPHY_B;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	case ENCODER_ID_INTERNAL_UNIPHY1:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_UNIPHY_C;
+		case ENUM_ID_2:
+			return TRANSMITTER_UNIPHY_D;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	case ENCODER_ID_INTERNAL_UNIPHY2:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_UNIPHY_E;
+		case ENUM_ID_2:
+			return TRANSMITTER_UNIPHY_F;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	case ENCODER_ID_INTERNAL_UNIPHY3:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_UNIPHY_G;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	case ENCODER_ID_EXTERNAL_NUTMEG:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_NUTMEG_CRT;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	case ENCODER_ID_EXTERNAL_TRAVIS:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_TRAVIS_CRT;
+		case ENUM_ID_2:
+			return TRANSMITTER_TRAVIS_LCD;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	default:
+		return TRANSMITTER_UNKNOWN;
+	}
+}
+
+static bool construct(
+	struct dc_link *link,
+	const struct link_init_data *init_params)
+{
+	uint8_t i;
+	struct gpio *hpd_gpio = NULL;
+	struct ddc_service_init_data ddc_service_init_data = { { 0 } };
+	struct dc_context *dc_ctx = init_params->ctx;
+	struct encoder_init_data enc_init_data = { 0 };
+	struct integrated_info info = {{{ 0 }}};
+	struct dc_bios *bios = init_params->dc->ctx->dc_bios;
+	const struct dc_vbios_funcs *bp_funcs = bios->funcs;
+
+	link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+	link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
+
+	link->link_status.dpcd_caps = &link->dpcd_caps;
+
+	link->dc = init_params->dc;
+	link->ctx = dc_ctx;
+	link->link_index = init_params->link_index;
+
+	link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
+
+	if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
+		dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d!\n",
+				__func__, init_params->connector_index);
+		goto create_fail;
+	}
+
+	hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+
+	if (hpd_gpio != NULL)
+		link->irq_source_hpd = dal_irq_get_source(hpd_gpio);
+
+	switch (link->link_id.id) {
+	case CONNECTOR_ID_HDMI_TYPE_A:
+		link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+
+		break;
+	case CONNECTOR_ID_SINGLE_LINK_DVID:
+	case CONNECTOR_ID_SINGLE_LINK_DVII:
+		link->connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+		break;
+	case CONNECTOR_ID_DUAL_LINK_DVID:
+	case CONNECTOR_ID_DUAL_LINK_DVII:
+		link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+		break;
+	case CONNECTOR_ID_DISPLAY_PORT:
+		link->connector_signal =	SIGNAL_TYPE_DISPLAY_PORT;
+
+		if (hpd_gpio != NULL)
+			link->irq_source_hpd_rx =
+					dal_irq_get_rx_source(hpd_gpio);
+
+		break;
+	case CONNECTOR_ID_EDP:
+		link->connector_signal = SIGNAL_TYPE_EDP;
+
+		if (hpd_gpio != NULL) {
+			link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+			link->irq_source_hpd_rx =
+					dal_irq_get_rx_source(hpd_gpio);
+		}
+		break;
+	default:
+		dm_logger_write(dc_ctx->logger, LOG_WARNING,
+			"Unsupported Connector type:%d!\n", link->link_id.id);
+		goto create_fail;
+	}
+
+	if (hpd_gpio != NULL) {
+		dal_gpio_destroy_irq(&hpd_gpio);
+		hpd_gpio = NULL;
+	}
+
+	/* TODO: #DAL3 Implement id to str function.*/
+	LINK_INFO("Connector[%d] description:"
+			"signal %d\n",
+			init_params->connector_index,
+			link->connector_signal);
+
+	ddc_service_init_data.ctx = link->ctx;
+	ddc_service_init_data.id = link->link_id;
+	ddc_service_init_data.link = link;
+	link->ddc = dal_ddc_service_create(&ddc_service_init_data);
+
+	if (link->ddc == NULL) {
+		DC_ERROR("Failed to create ddc_service!\n");
+		goto ddc_create_fail;
+	}
+
+	link->ddc_hw_inst =
+		dal_ddc_get_line(
+			dal_ddc_service_get_ddc_pin(link->ddc));
+
+	enc_init_data.ctx = dc_ctx;
+	bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, &enc_init_data.encoder);
+	enc_init_data.connector = link->link_id;
+	enc_init_data.channel = get_ddc_line(link);
+	enc_init_data.hpd_source = get_hpd_line(link);
+
+	link->hpd_src = enc_init_data.hpd_source;
+
+	enc_init_data.transmitter =
+			translate_encoder_to_transmitter(enc_init_data.encoder);
+	link->link_enc = link->dc->res_pool->funcs->link_enc_create(
+								&enc_init_data);
+
+	if( link->link_enc == NULL) {
+		DC_ERROR("Failed to create link encoder!\n");
+		goto link_enc_create_fail;
+	}
+
+	link->link_enc_hw_inst = link->link_enc->transmitter;
+
+	for (i = 0; i < 4; i++) {
+		if (BP_RESULT_OK !=
+				bp_funcs->get_device_tag(dc_ctx->dc_bios, link->link_id, i, &link->device_tag)) {
+			DC_ERROR("Failed to find device tag!\n");
+			goto device_tag_fail;
+		}
+
+		/* Look for device tag that matches connector signal,
+		 * CRT for rgb, LCD for other supported signal tyes
+		 */
+		if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, link->device_tag.dev_id))
+			continue;
+		if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT
+			&& link->connector_signal != SIGNAL_TYPE_RGB)
+			continue;
+		if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD
+			&& link->connector_signal == SIGNAL_TYPE_RGB)
+			continue;
+		break;
+	}
+
+	if (bios->integrated_info)
+		info = *bios->integrated_info;
+
+	/* Look for channel mapping corresponding to connector and device tag */
+	for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
+		struct external_display_path *path =
+			&info.ext_disp_conn_info.path[i];
+		if (path->device_connector_id.enum_id == link->link_id.enum_id
+			&& path->device_connector_id.id == link->link_id.id
+			&& path->device_connector_id.type == link->link_id.type) {
+
+			if (link->device_tag.acpi_device != 0
+				&& path->device_acpi_enum == link->device_tag.acpi_device) {
+				link->ddi_channel_mapping = path->channel_mapping;
+				link->chip_caps = path->caps;
+			} else if (path->device_tag ==
+					link->device_tag.dev_id.raw_device_tag) {
+				link->ddi_channel_mapping = path->channel_mapping;
+				link->chip_caps = path->caps;
+			}
+			break;
+		}
+	}
+
+	/*
+	 * TODO check if GPIO programmed correctly
+	 *
+	 * If GPIO isn't programmed correctly HPD might not rise or drain
+	 * fast enough, leading to bounces.
+	 */
+	program_hpd_filter(link);
+
+	return true;
+device_tag_fail:
+	link->link_enc->funcs->destroy(&link->link_enc);
+link_enc_create_fail:
+	dal_ddc_service_destroy(&link->ddc);
+ddc_create_fail:
+create_fail:
+
+	if (hpd_gpio != NULL) {
+		dal_gpio_destroy_irq(&hpd_gpio);
+	}
+
+	return false;
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+struct dc_link *link_create(const struct link_init_data *init_params)
+{
+	struct dc_link *link =
+			kzalloc(sizeof(*link), GFP_KERNEL);
+
+	if (NULL == link)
+		goto alloc_fail;
+
+	if (false == construct(link, init_params))
+		goto construct_fail;
+
+	return link;
+
+construct_fail:
+	kfree(link);
+
+alloc_fail:
+	return NULL;
+}
+
+void link_destroy(struct dc_link **link)
+{
+	destruct(*link);
+	kfree(*link);
+	*link = NULL;
+}
+
+static void dpcd_configure_panel_mode(
+	struct dc_link *link,
+	enum dp_panel_mode panel_mode)
+{
+	union dpcd_edp_config edp_config_set;
+	bool panel_mode_edp = false;
+
+	memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
+
+	if (DP_PANEL_MODE_DEFAULT != panel_mode) {
+
+		switch (panel_mode) {
+		case DP_PANEL_MODE_EDP:
+		case DP_PANEL_MODE_SPECIAL:
+			panel_mode_edp = true;
+			break;
+
+		default:
+			break;
+		}
+
+		/*set edp panel mode in receiver*/
+		core_link_read_dpcd(
+			link,
+			DP_EDP_CONFIGURATION_SET,
+			&edp_config_set.raw,
+			sizeof(edp_config_set.raw));
+
+		if (edp_config_set.bits.PANEL_MODE_EDP
+			!= panel_mode_edp) {
+			enum ddc_result result = DDC_RESULT_UNKNOWN;
+
+			edp_config_set.bits.PANEL_MODE_EDP =
+			panel_mode_edp;
+			result = core_link_write_dpcd(
+				link,
+				DP_EDP_CONFIGURATION_SET,
+				&edp_config_set.raw,
+				sizeof(edp_config_set.raw));
+
+			ASSERT(result == DDC_RESULT_SUCESSFULL);
+		}
+	}
+	dm_logger_write(link->ctx->logger, LOG_DETECTION_DP_CAPS,
+			"Link: %d eDP panel mode supported: %d "
+			"eDP panel mode enabled: %d \n",
+			link->link_index,
+			link->dpcd_caps.panel_mode_edp,
+			panel_mode_edp);
+}
+
+static void enable_stream_features(struct pipe_ctx *pipe_ctx)
+{
+	struct dc_stream_state *stream = pipe_ctx->stream;
+	struct dc_link *link = stream->sink->link;
+	union down_spread_ctrl downspread;
+
+	core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
+			&downspread.raw, sizeof(downspread));
+
+	downspread.bits.IGNORE_MSA_TIMING_PARAM =
+			(stream->ignore_msa_timing_param) ? 1 : 0;
+
+	core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+			&downspread.raw, sizeof(downspread));
+}
+
+static enum dc_status enable_link_dp(
+		struct dc_state *state,
+		struct pipe_ctx *pipe_ctx)
+{
+	struct dc_stream_state *stream = pipe_ctx->stream;
+	enum dc_status status;
+	bool skip_video_pattern;
+	struct dc_link *link = stream->sink->link;
+	struct dc_link_settings link_settings = {0};
+	enum dp_panel_mode panel_mode;
+	enum dc_link_rate max_link_rate = LINK_RATE_HIGH2;
+
+	/* get link settings for video mode timing */
+	decide_link_settings(stream, &link_settings);
+
+	/* raise clock state for HBR3 if required. Confirmed with HW DCE/DPCS
+	 * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
+	 */
+	if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
+		max_link_rate = LINK_RATE_HIGH3;
+
+	if (link_settings.link_rate == max_link_rate) {
+		if (state->dis_clk->funcs->set_min_clocks_state) {
+			if (state->dis_clk->cur_min_clks_state < DM_PP_CLOCKS_STATE_NOMINAL)
+				state->dis_clk->funcs->set_min_clocks_state(
+					state->dis_clk, DM_PP_CLOCKS_STATE_NOMINAL);
+		} else {
+			uint32_t dp_phyclk_in_khz;
+			const struct clocks_value clocks_value =
+					state->dis_clk->cur_clocks_value;
+
+			/* 27mhz = 27000000hz= 27000khz */
+			dp_phyclk_in_khz = link_settings.link_rate * 27000;
+
+			if (((clocks_value.max_non_dp_phyclk_in_khz != 0) &&
+				(dp_phyclk_in_khz > clocks_value.max_non_dp_phyclk_in_khz)) ||
+				(dp_phyclk_in_khz > clocks_value.max_dp_phyclk_in_khz)) {
+				state->dis_clk->funcs->apply_clock_voltage_request(
+						state->dis_clk,
+						DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+						dp_phyclk_in_khz,
+						false,
+						true);
+			}
+		}
+	}
+
+	dp_enable_link_phy(
+		link,
+		pipe_ctx->stream->signal,
+		pipe_ctx->clock_source->id,
+		&link_settings);
+
+	panel_mode = dp_get_panel_mode(link);
+	dpcd_configure_panel_mode(link, panel_mode);
+
+	skip_video_pattern = true;
+
+	if (link_settings.link_rate == LINK_RATE_LOW)
+			skip_video_pattern = false;
+
+	if (perform_link_training_with_retries(
+			link,
+			&link_settings,
+			skip_video_pattern,
+			LINK_TRAINING_ATTEMPTS)) {
+		link->cur_link_settings = link_settings;
+		status = DC_OK;
+	}
+	else
+		status = DC_FAIL_DP_LINK_TRAINING;
+
+	enable_stream_features(pipe_ctx);
+
+	return status;
+}
+
+static enum dc_status enable_link_dp_mst(
+		struct dc_state *state,
+		struct pipe_ctx *pipe_ctx)
+{
+	struct dc_link *link = pipe_ctx->stream->sink->link;
+
+	/* sink signal type after MST branch is MST. Multiple MST sinks
+	 * share one link. Link DP PHY is enable or training only once.
+	 */
+	if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
+		return DC_OK;
+
+	/* set the sink to MST mode before enabling the link */
+	dp_enable_mst_on_sink(link, true);
+
+	return enable_link_dp(state, pipe_ctx);
+}
+
+static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx,
+		enum engine_id eng_id,
+		struct ext_hdmi_settings *settings)
+{
+	bool result = false;
+	int i = 0;
+	struct integrated_info *integrated_info =
+			pipe_ctx->stream->ctx->dc_bios->integrated_info;
+
+	if (integrated_info == NULL)
+		return false;
+
+	/*
+	 * Get retimer settings from sbios for passing SI eye test for DCE11
+	 * The setting values are varied based on board revision and port id
+	 * Therefore the setting values of each ports is passed by sbios.
+	 */
+
+	// Check if current bios contains ext Hdmi settings
+	if (integrated_info->gpu_cap_info & 0x20) {
+		switch (eng_id) {
+		case ENGINE_ID_DIGA:
+			settings->slv_addr = integrated_info->dp0_ext_hdmi_slv_addr;
+			settings->reg_num = integrated_info->dp0_ext_hdmi_6g_reg_num;
+			settings->reg_num_6g = integrated_info->dp0_ext_hdmi_6g_reg_num;
+			memmove(settings->reg_settings,
+					integrated_info->dp0_ext_hdmi_reg_settings,
+					sizeof(integrated_info->dp0_ext_hdmi_reg_settings));
+			memmove(settings->reg_settings_6g,
+					integrated_info->dp0_ext_hdmi_6g_reg_settings,
+					sizeof(integrated_info->dp0_ext_hdmi_6g_reg_settings));
+			result = true;
+			break;
+		case ENGINE_ID_DIGB:
+			settings->slv_addr = integrated_info->dp1_ext_hdmi_slv_addr;
+			settings->reg_num = integrated_info->dp1_ext_hdmi_6g_reg_num;
+			settings->reg_num_6g = integrated_info->dp1_ext_hdmi_6g_reg_num;
+			memmove(settings->reg_settings,
+					integrated_info->dp1_ext_hdmi_reg_settings,
+					sizeof(integrated_info->dp1_ext_hdmi_reg_settings));
+			memmove(settings->reg_settings_6g,
+					integrated_info->dp1_ext_hdmi_6g_reg_settings,
+					sizeof(integrated_info->dp1_ext_hdmi_6g_reg_settings));
+			result = true;
+			break;
+		case ENGINE_ID_DIGC:
+			settings->slv_addr = integrated_info->dp2_ext_hdmi_slv_addr;
+			settings->reg_num = integrated_info->dp2_ext_hdmi_6g_reg_num;
+			settings->reg_num_6g = integrated_info->dp2_ext_hdmi_6g_reg_num;
+			memmove(settings->reg_settings,
+					integrated_info->dp2_ext_hdmi_reg_settings,
+					sizeof(integrated_info->dp2_ext_hdmi_reg_settings));
+			memmove(settings->reg_settings_6g,
+					integrated_info->dp2_ext_hdmi_6g_reg_settings,
+					sizeof(integrated_info->dp2_ext_hdmi_6g_reg_settings));
+			result = true;
+			break;
+		case ENGINE_ID_DIGD:
+			settings->slv_addr = integrated_info->dp3_ext_hdmi_slv_addr;
+			settings->reg_num = integrated_info->dp3_ext_hdmi_6g_reg_num;
+			settings->reg_num_6g = integrated_info->dp3_ext_hdmi_6g_reg_num;
+			memmove(settings->reg_settings,
+					integrated_info->dp3_ext_hdmi_reg_settings,
+					sizeof(integrated_info->dp3_ext_hdmi_reg_settings));
+			memmove(settings->reg_settings_6g,
+					integrated_info->dp3_ext_hdmi_6g_reg_settings,
+					sizeof(integrated_info->dp3_ext_hdmi_6g_reg_settings));
+			result = true;
+			break;
+		default:
+			break;
+		}
+
+		if (result == true) {
+			// Validate settings from bios integrated info table
+			if (settings->slv_addr == 0)
+				return false;
+			if (settings->reg_num > 9)
+				return false;
+			if (settings->reg_num_6g > 3)
+				return false;
+
+			for (i = 0; i < settings->reg_num; i++) {
+				if (settings->reg_settings[i].i2c_reg_index > 0x20)
+					return false;
+			}
+
+			for (i = 0; i < settings->reg_num_6g; i++) {
+				if (settings->reg_settings_6g[i].i2c_reg_index > 0x20)
+					return false;
+			}
+		}
+	}
+
+	return result;
+}
+
+static bool i2c_write(struct pipe_ctx *pipe_ctx,
+		uint8_t address, uint8_t *buffer, uint32_t length)
+{
+	struct i2c_command cmd = {0};
+	struct i2c_payload payload = {0};
+
+	memset(&payload, 0, sizeof(payload));
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.number_of_payloads = 1;
+	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+	cmd.speed = pipe_ctx->stream->ctx->dc->caps.i2c_speed_in_khz;
+
+	payload.address = address;
+	payload.data = buffer;
+	payload.length = length;
+	payload.write = true;
+	cmd.payloads = &payload;
+
+	if (dc_submit_i2c(pipe_ctx->stream->ctx->dc,
+			pipe_ctx->stream->sink->link->link_index, &cmd))
+		return true;
+
+	return false;
+}
+
+static void write_i2c_retimer_setting(
+		struct pipe_ctx *pipe_ctx,
+		bool is_vga_mode,
+		bool is_over_340mhz,
+		struct ext_hdmi_settings *settings)
+{
+	uint8_t slave_address = (settings->slv_addr >> 1);
+	uint8_t buffer[2];
+	const uint8_t apply_rx_tx_change = 0x4;
+	uint8_t offset = 0xA;
+	uint8_t value = 0;
+	int i = 0;
+	bool i2c_success = false;
+
+	memset(&buffer, 0, sizeof(buffer));
+
+	/* Start Ext-Hdmi programming*/
+
+	for (i = 0; i < settings->reg_num; i++) {
+		/* Apply 3G settings */
+		if (settings->reg_settings[i].i2c_reg_index <= 0x20) {
+
+			buffer[0] = settings->reg_settings[i].i2c_reg_index;
+			buffer[1] = settings->reg_settings[i].i2c_reg_val;
+			i2c_success = i2c_write(pipe_ctx, slave_address,
+						buffer, sizeof(buffer));
+
+			if (!i2c_success)
+				/* Write failure */
+				ASSERT(i2c_success);
+
+			/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
+			 * needs to be set to 1 on every 0xA-0xC write.
+			 */
+			if (settings->reg_settings[i].i2c_reg_index == 0xA ||
+				settings->reg_settings[i].i2c_reg_index == 0xB ||
+				settings->reg_settings[i].i2c_reg_index == 0xC) {
+
+				/* Query current value from offset 0xA */
+				if (settings->reg_settings[i].i2c_reg_index == 0xA)
+					value = settings->reg_settings[i].i2c_reg_val;
+				else {
+					i2c_success =
+						dal_ddc_service_query_ddc_data(
+						pipe_ctx->stream->sink->link->ddc,
+						slave_address, &offset, 1, &value, 1);
+					if (!i2c_success)
+						/* Write failure */
+						ASSERT(i2c_success);
+				}
+
+				buffer[0] = offset;
+				/* Set APPLY_RX_TX_CHANGE bit to 1 */
+				buffer[1] = value | apply_rx_tx_change;
+				i2c_success = i2c_write(pipe_ctx, slave_address,
+						buffer, sizeof(buffer));
+				if (!i2c_success)
+					/* Write failure */
+					ASSERT(i2c_success);
+			}
+		}
+	}
+
+	/* Apply 3G settings */
+	if (is_over_340mhz) {
+		for (i = 0; i < settings->reg_num_6g; i++) {
+			/* Apply 3G settings */
+			if (settings->reg_settings[i].i2c_reg_index <= 0x20) {
+
+				buffer[0] = settings->reg_settings_6g[i].i2c_reg_index;
+				buffer[1] = settings->reg_settings_6g[i].i2c_reg_val;
+				i2c_success = i2c_write(pipe_ctx, slave_address,
+							buffer, sizeof(buffer));
+
+				if (!i2c_success)
+					/* Write failure */
+					ASSERT(i2c_success);
+
+				/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
+				 * needs to be set to 1 on every 0xA-0xC write.
+				 */
+				if (settings->reg_settings_6g[i].i2c_reg_index == 0xA ||
+					settings->reg_settings_6g[i].i2c_reg_index == 0xB ||
+					settings->reg_settings_6g[i].i2c_reg_index == 0xC) {
+
+					/* Query current value from offset 0xA */
+					if (settings->reg_settings_6g[i].i2c_reg_index == 0xA)
+						value = settings->reg_settings_6g[i].i2c_reg_val;
+					else {
+						i2c_success =
+								dal_ddc_service_query_ddc_data(
+								pipe_ctx->stream->sink->link->ddc,
+								slave_address, &offset, 1, &value, 1);
+						if (!i2c_success)
+							/* Write failure */
+							ASSERT(i2c_success);
+					}
+
+					buffer[0] = offset;
+					/* Set APPLY_RX_TX_CHANGE bit to 1 */
+					buffer[1] = value | apply_rx_tx_change;
+					i2c_success = i2c_write(pipe_ctx, slave_address,
+							buffer, sizeof(buffer));
+					if (!i2c_success)
+						/* Write failure */
+						ASSERT(i2c_success);
+				}
+			}
+		}
+	}
+
+	if (is_vga_mode) {
+		/* Program additional settings if using 640x480 resolution */
+
+		/* Write offset 0xFF to 0x01 */
+		buffer[0] = 0xff;
+		buffer[1] = 0x01;
+		i2c_success = i2c_write(pipe_ctx, slave_address,
+				buffer, sizeof(buffer));
+		if (!i2c_success)
+			/* Write failure */
+			ASSERT(i2c_success);
+
+		/* Write offset 0x00 to 0x23 */
+		buffer[0] = 0x00;
+		buffer[1] = 0x23;
+		i2c_success = i2c_write(pipe_ctx, slave_address,
+				buffer, sizeof(buffer));
+		if (!i2c_success)
+			/* Write failure */
+			ASSERT(i2c_success);
+
+		/* Write offset 0xff to 0x00 */
+		buffer[0] = 0xff;
+		buffer[1] = 0x00;
+		i2c_success = i2c_write(pipe_ctx, slave_address,
+				buffer, sizeof(buffer));
+		if (!i2c_success)
+			/* Write failure */
+			ASSERT(i2c_success);
+
+	}
+}
+
+static void write_i2c_default_retimer_setting(
+		struct pipe_ctx *pipe_ctx,
+		bool is_vga_mode,
+		bool is_over_340mhz)
+{
+	uint8_t slave_address = (0xBA >> 1);
+	uint8_t buffer[2];
+	bool i2c_success = false;
+
+	memset(&buffer, 0, sizeof(buffer));
+
+	/* Program Slave Address for tuning single integrity */
+	/* Write offset 0x0A to 0x13 */
+	buffer[0] = 0x0A;
+	buffer[1] = 0x13;
+	i2c_success = i2c_write(pipe_ctx, slave_address,
+			buffer, sizeof(buffer));
+	if (!i2c_success)
+		/* Write failure */
+		ASSERT(i2c_success);
+
+	/* Write offset 0x0A to 0x17 */
+	buffer[0] = 0x0A;
+	buffer[1] = 0x17;
+	i2c_success = i2c_write(pipe_ctx, slave_address,
+			buffer, sizeof(buffer));
+	if (!i2c_success)
+		/* Write failure */
+		ASSERT(i2c_success);
+
+	/* Write offset 0x0B to 0xDA or 0xD8 */
+	buffer[0] = 0x0B;
+	buffer[1] = is_over_340mhz ? 0xDA : 0xD8;
+	i2c_success = i2c_write(pipe_ctx, slave_address,
+			buffer, sizeof(buffer));
+	if (!i2c_success)
+		/* Write failure */
+		ASSERT(i2c_success);
+
+	/* Write offset 0x0A to 0x17 */
+	buffer[0] = 0x0A;
+	buffer[1] = 0x17;
+	i2c_success = i2c_write(pipe_ctx, slave_address,
+			buffer, sizeof(buffer));
+	if (!i2c_success)
+		/* Write failure */
+		ASSERT(i2c_success);
+
+	/* Write offset 0x0C to 0x1D or 0x91 */
+	buffer[0] = 0x0C;
+	buffer[1] = is_over_340mhz ? 0x1D : 0x91;
+	i2c_success = i2c_write(pipe_ctx, slave_address,
+			buffer, sizeof(buffer));
+	if (!i2c_success)
+		/* Write failure */
+		ASSERT(i2c_success);
+
+	/* Write offset 0x0A to 0x17 */
+	buffer[0] = 0x0A;
+	buffer[1] = 0x17;
+	i2c_success = i2c_write(pipe_ctx, slave_address,
+			buffer, sizeof(buffer));
+	if (!i2c_success)
+		/* Write failure */
+		ASSERT(i2c_success);
+
+
+	if (is_vga_mode) {
+		/* Program additional settings if using 640x480 resolution */
+
+		/* Write offset 0xFF to 0x01 */
+		buffer[0] = 0xff;
+		buffer[1] = 0x01;
+		i2c_success = i2c_write(pipe_ctx, slave_address,
+				buffer, sizeof(buffer));
+		if (!i2c_success)
+			/* Write failure */
+			ASSERT(i2c_success);
+
+		/* Write offset 0x00 to 0x23 */
+		buffer[0] = 0x00;
+		buffer[1] = 0x23;
+		i2c_success = i2c_write(pipe_ctx, slave_address,
+				buffer, sizeof(buffer));
+		if (!i2c_success)
+			/* Write failure */
+			ASSERT(i2c_success);
+
+		/* Write offset 0xff to 0x00 */
+		buffer[0] = 0xff;
+		buffer[1] = 0x00;
+		i2c_success = i2c_write(pipe_ctx, slave_address,
+				buffer, sizeof(buffer));
+		if (!i2c_success)
+			/* Write failure */
+			ASSERT(i2c_success);
+	}
+}
+
+static void write_i2c_redriver_setting(
+		struct pipe_ctx *pipe_ctx,
+		bool is_over_340mhz)
+{
+	uint8_t slave_address = (0xF0 >> 1);
+	uint8_t buffer[16];
+	bool i2c_success = false;
+
+	memset(&buffer, 0, sizeof(buffer));
+
+	// Program Slave Address for tuning single integrity
+	buffer[3] = 0x4E;
+	buffer[4] = 0x4E;
+	buffer[5] = 0x4E;
+	buffer[6] = is_over_340mhz ? 0x4E : 0x4A;
+
+	i2c_success = i2c_write(pipe_ctx, slave_address,
+					buffer, sizeof(buffer));
+
+	if (!i2c_success)
+		/* Write failure */
+		ASSERT(i2c_success);
+}
+
+static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
+{
+	struct dc_stream_state *stream = pipe_ctx->stream;
+	struct dc_link *link = stream->sink->link;
+	enum dc_color_depth display_color_depth;
+	enum engine_id eng_id;
+	struct ext_hdmi_settings settings = {0};
+	bool is_over_340mhz = false;
+	bool is_vga_mode = (stream->timing.h_addressable == 640)
+			&& (stream->timing.v_addressable == 480);
+
+	if (stream->phy_pix_clk > 340000)
+		is_over_340mhz = true;
+
+	if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
+		unsigned short masked_chip_caps = pipe_ctx->stream->sink->link->chip_caps &
+				EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+		if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+			/* DP159, Retimer settings */
+			eng_id = pipe_ctx->stream_res.stream_enc->id;
+
+			if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) {
+				write_i2c_retimer_setting(pipe_ctx,
+						is_vga_mode, is_over_340mhz, &settings);
+			} else {
+				write_i2c_default_retimer_setting(pipe_ctx,
+						is_vga_mode, is_over_340mhz);
+			}
+		} else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+			/* PI3EQX1204, Redriver settings */
+			write_i2c_redriver_setting(pipe_ctx, is_over_340mhz);
+		}
+	}
+
+	if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+		dal_ddc_service_write_scdc_data(
+			stream->sink->link->ddc,
+			stream->phy_pix_clk,
+			stream->timing.flags.LTE_340MCSC_SCRAMBLE);
+
+	memset(&stream->sink->link->cur_link_settings, 0,
+			sizeof(struct dc_link_settings));
+
+	display_color_depth = stream->timing.display_color_depth;
+	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+		display_color_depth = COLOR_DEPTH_888;
+
+	link->link_enc->funcs->enable_tmds_output(
+			link->link_enc,
+			pipe_ctx->clock_source->id,
+			display_color_depth,
+			pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A,
+			pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK,
+			stream->phy_pix_clk);
+
+	if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+		dal_ddc_service_read_scdc_data(link->ddc);
+}
+
+/****************************enable_link***********************************/
+static enum dc_status enable_link(
+		struct dc_state *state,
+		struct pipe_ctx *pipe_ctx)
+{
+	enum dc_status status = DC_ERROR_UNEXPECTED;
+	switch (pipe_ctx->stream->signal) {
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_EDP:
+		status = enable_link_dp(state, pipe_ctx);
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		status = enable_link_dp_mst(state, pipe_ctx);
+		msleep(200);
+		break;
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		enable_link_hdmi(pipe_ctx);
+		status = DC_OK;
+		break;
+	case SIGNAL_TYPE_VIRTUAL:
+		status = DC_OK;
+		break;
+	default:
+		break;
+	}
+
+	if (pipe_ctx->stream_res.audio && status == DC_OK) {
+		/* notify audio driver for audio modes of monitor */
+		pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
+
+		/* un-mute audio */
+		/* TODO: audio should be per stream rather than per link */
+		pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+			pipe_ctx->stream_res.stream_enc, false);
+	}
+
+	return status;
+}
+
+static void disable_link(struct dc_link *link, enum signal_type signal)
+{
+	/*
+	 * TODO: implement call for dp_set_hw_test_pattern
+	 * it is needed for compliance testing
+	 */
+
+	/* here we need to specify that encoder output settings
+	 * need to be calculated as for the set mode,
+	 * it will lead to querying dynamic link capabilities
+	 * which should be done before enable output */
+
+	if (dc_is_dp_signal(signal)) {
+		/* SST DP, eDP */
+		if (dc_is_dp_sst_signal(signal))
+			dp_disable_link_phy(link, signal);
+		else
+			dp_disable_link_phy_mst(link, signal);
+	} else
+		link->link_enc->funcs->disable_output(link->link_enc, signal, link);
+}
+
+enum dc_status dc_link_validate_mode_timing(
+		const struct dc_stream_state *stream,
+		struct dc_link *link,
+		const struct dc_crtc_timing *timing)
+{
+	uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
+
+	/* A hack to avoid failing any modes for EDID override feature on
+	 * topology change such as lower quality cable for DP or different dongle
+	 */
+	if (link->remote_sinks[0])
+		return DC_OK;
+
+	if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk)
+		return DC_EXCEED_DONGLE_MAX_CLK;
+
+	switch (stream->signal) {
+	case SIGNAL_TYPE_EDP:
+	case SIGNAL_TYPE_DISPLAY_PORT:
+		if (!dp_validate_mode_timing(
+				link,
+				timing))
+			return DC_NO_DP_LINK_BANDWIDTH;
+		break;
+
+	default:
+		break;
+	}
+
+	return DC_OK;
+}
+
+
+bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
+		uint32_t frame_ramp, const struct dc_stream_state *stream)
+{
+	struct dc  *core_dc = link->ctx->dc;
+	struct abm *abm = core_dc->res_pool->abm;
+	unsigned int controller_id = 0;
+	int i;
+
+	if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
+		return false;
+
+	dm_logger_write(link->ctx->logger, LOG_BACKLIGHT,
+			"New Backlight level: %d (0x%X)\n", level, level);
+
+	if (dc_is_embedded_signal(link->connector_signal)) {
+		if (stream != NULL) {
+			for (i = 0; i < MAX_PIPES; i++) {
+				if (core_dc->current_state->res_ctx.
+						pipe_ctx[i].stream
+						== stream)
+					/* DMCU -1 for all controller id values,
+					 * therefore +1 here
+					 */
+					controller_id =
+						core_dc->current_state->
+						res_ctx.pipe_ctx[i].stream_res.tg->inst +
+						1;
+			}
+		}
+		abm->funcs->set_backlight_level(
+				abm,
+				level,
+				frame_ramp,
+				controller_id);
+	}
+
+	return true;
+}
+
+bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
+{
+	struct dc  *core_dc = link->ctx->dc;
+	struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+	if (dmcu != NULL && link->psr_enabled)
+		dmcu->funcs->set_psr_enable(dmcu, enable, wait);
+
+	return true;
+}
+
+bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
+{
+	struct dc  *core_dc = link->ctx->dc;
+	struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+	if (dmcu != NULL && link->psr_enabled)
+		dmcu->funcs->get_psr_state(dmcu, psr_state);
+
+	return true;
+}
+
+bool dc_link_setup_psr(struct dc_link *link,
+		const struct dc_stream_state *stream, struct psr_config *psr_config,
+		struct psr_context *psr_context)
+{
+	struct dc  *core_dc = link->ctx->dc;
+	struct dmcu *dmcu = core_dc->res_pool->dmcu;
+	int i;
+
+	psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
+
+	if (link != NULL &&
+		dmcu != NULL) {
+		/* updateSinkPsrDpcdConfig*/
+		union dpcd_psr_configuration psr_configuration;
+
+		memset(&psr_configuration, 0, sizeof(psr_configuration));
+
+		psr_configuration.bits.ENABLE                    = 1;
+		psr_configuration.bits.CRC_VERIFICATION          = 1;
+		psr_configuration.bits.FRAME_CAPTURE_INDICATION  =
+				psr_config->psr_frame_capture_indication_req;
+
+		/* Check for PSR v2*/
+		if (psr_config->psr_version == 0x2) {
+			/* For PSR v2 selective update.
+			 * Indicates whether sink should start capturing
+			 * immediately following active scan line,
+			 * or starting with the 2nd active scan line.
+			 */
+			psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
+			/*For PSR v2, determines whether Sink should generate
+			 * IRQ_HPD when CRC mismatch is detected.
+			 */
+			psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR    = 1;
+		}
+
+		dm_helpers_dp_write_dpcd(
+			link->ctx,
+			link,
+			368,
+			&psr_configuration.raw,
+			sizeof(psr_configuration.raw));
+
+		psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
+		psr_context->transmitterId = link->link_enc->transmitter;
+		psr_context->engineId = link->link_enc->preferred_engine;
+
+		for (i = 0; i < MAX_PIPES; i++) {
+			if (core_dc->current_state->res_ctx.pipe_ctx[i].stream
+					== stream) {
+				/* dmcu -1 for all controller id values,
+				 * therefore +1 here
+				 */
+				psr_context->controllerId =
+					core_dc->current_state->res_ctx.
+					pipe_ctx[i].stream_res.tg->inst + 1;
+				break;
+			}
+		}
+
+		/* Hardcoded for now.  Can be Pcie or Uniphy (or Unknown)*/
+		psr_context->phyType = PHY_TYPE_UNIPHY;
+		/*PhyId is associated with the transmitter id*/
+		psr_context->smuPhyId = link->link_enc->transmitter;
+
+		psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
+		psr_context->vsyncRateHz = div64_u64(div64_u64((stream->
+						timing.pix_clk_khz * 1000),
+						stream->timing.v_total),
+						stream->timing.h_total);
+
+		psr_context->psrSupportedDisplayConfig = true;
+		psr_context->psrExitLinkTrainingRequired =
+			psr_config->psr_exit_link_training_required;
+		psr_context->sdpTransmitLineNumDeadline =
+			psr_config->psr_sdp_transmit_line_num_deadline;
+		psr_context->psrFrameCaptureIndicationReq =
+			psr_config->psr_frame_capture_indication_req;
+
+		psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */
+
+		psr_context->numberOfControllers =
+				link->dc->res_pool->res_cap->num_timing_generator;
+
+		psr_context->rfb_update_auto_en = true;
+
+		/* 2 frames before enter PSR. */
+		psr_context->timehyst_frames = 2;
+		/* half a frame
+		 * (units in 100 lines, i.e. a value of 1 represents 100 lines)
+		 */
+		psr_context->hyst_lines = stream->timing.v_total / 2 / 100;
+		psr_context->aux_repeats = 10;
+
+		psr_context->psr_level.u32all = 0;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+		/*skip power down the single pipe since it blocks the cstate*/
+		if (ASIC_REV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))
+			psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
+#endif
+
+		/* SMU will perform additional powerdown sequence.
+		 * For unsupported ASICs, set psr_level flag to skip PSR
+		 *  static screen notification to SMU.
+		 *  (Always set for DAL2, did not check ASIC)
+		 */
+		psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION = 1;
+
+		/* Complete PSR entry before aborting to prevent intermittent
+		 * freezes on certain eDPs
+		 */
+		psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1;
+
+		/* Controls additional delay after remote frame capture before
+		 * continuing power down, default = 0
+		 */
+		psr_context->frame_delay = 0;
+
+		link->psr_enabled = true;
+		dmcu->funcs->setup_psr(dmcu, link, psr_context);
+		return true;
+	} else
+		return false;
+
+}
+
+const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
+{
+	return &link->link_status;
+}
+
+void core_link_resume(struct dc_link *link)
+{
+	if (link->connector_signal != SIGNAL_TYPE_VIRTUAL)
+		program_hpd_filter(link);
+}
+
+static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
+{
+	struct dc_link_settings *link_settings =
+			&stream->sink->link->cur_link_settings;
+	uint32_t link_rate_in_mbps =
+			link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
+	struct fixed31_32 mbps = dal_fixed31_32_from_int(
+			link_rate_in_mbps * link_settings->lane_count);
+
+	return dal_fixed31_32_div_int(mbps, 54);
+}
+
+static int get_color_depth(enum dc_color_depth color_depth)
+{
+	switch (color_depth) {
+	case COLOR_DEPTH_666: return 6;
+	case COLOR_DEPTH_888: return 8;
+	case COLOR_DEPTH_101010: return 10;
+	case COLOR_DEPTH_121212: return 12;
+	case COLOR_DEPTH_141414: return 14;
+	case COLOR_DEPTH_161616: return 16;
+	default: return 0;
+	}
+}
+
+static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
+{
+	uint32_t bpc;
+	uint64_t kbps;
+	struct fixed31_32 peak_kbps;
+	uint32_t numerator;
+	uint32_t denominator;
+
+	bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
+	kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk * bpc * 3;
+
+	/*
+	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+	 * common multiplier to render an integer PBN for all link rate/lane
+	 * counts combinations
+	 * calculate
+	 * peak_kbps *= (1006/1000)
+	 * peak_kbps *= (64/54)
+	 * peak_kbps *= 8    convert to bytes
+	 */
+
+	numerator = 64 * PEAK_FACTOR_X1000;
+	denominator = 54 * 8 * 1000 * 1000;
+	kbps *= numerator;
+	peak_kbps = dal_fixed31_32_from_fraction(kbps, denominator);
+
+	return peak_kbps;
+}
+
+static void update_mst_stream_alloc_table(
+	struct dc_link *link,
+	struct stream_encoder *stream_enc,
+	const struct dp_mst_stream_allocation_table *proposed_table)
+{
+	struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = {
+			{ 0 } };
+	struct link_mst_stream_allocation *dc_alloc;
+
+	int i;
+	int j;
+
+	/* if DRM proposed_table has more than one new payload */
+	ASSERT(proposed_table->stream_count -
+			link->mst_stream_alloc_table.stream_count < 2);
+
+	/* copy proposed_table to link, add stream encoder */
+	for (i = 0; i < proposed_table->stream_count; i++) {
+
+		for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) {
+			dc_alloc =
+			&link->mst_stream_alloc_table.stream_allocations[j];
+
+			if (dc_alloc->vcp_id ==
+				proposed_table->stream_allocations[i].vcp_id) {
+
+				work_table[i] = *dc_alloc;
+				break; /* exit j loop */
+			}
+		}
+
+		/* new vcp_id */
+		if (j == link->mst_stream_alloc_table.stream_count) {
+			work_table[i].vcp_id =
+				proposed_table->stream_allocations[i].vcp_id;
+			work_table[i].slot_count =
+				proposed_table->stream_allocations[i].slot_count;
+			work_table[i].stream_enc = stream_enc;
+		}
+	}
+
+	/* update link->mst_stream_alloc_table with work_table */
+	link->mst_stream_alloc_table.stream_count =
+			proposed_table->stream_count;
+	for (i = 0; i < MAX_CONTROLLER_NUM; i++)
+		link->mst_stream_alloc_table.stream_allocations[i] =
+				work_table[i];
+}
+
+/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
+ * because stream_encoder is not exposed to dm
+ */
+static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
+{
+	struct dc_stream_state *stream = pipe_ctx->stream;
+	struct dc_link *link = stream->sink->link;
+	struct link_encoder *link_encoder = link->link_enc;
+	struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+	struct dp_mst_stream_allocation_table proposed_table = {0};
+	struct fixed31_32 avg_time_slots_per_mtp;
+	struct fixed31_32 pbn;
+	struct fixed31_32 pbn_per_slot;
+	uint8_t i;
+
+	/* enable_link_dp_mst already check link->enabled_stream_count
+	 * and stream is in link->stream[]. This is called during set mode,
+	 * stream_enc is available.
+	 */
+
+	/* get calculate VC payload for stream: stream_alloc */
+	if (dm_helpers_dp_mst_write_payload_allocation_table(
+		stream->ctx,
+		stream,
+		&proposed_table,
+		true)) {
+		update_mst_stream_alloc_table(
+					link, pipe_ctx->stream_res.stream_enc, &proposed_table);
+	}
+	else
+		dm_logger_write(link->ctx->logger, LOG_WARNING,
+				"Failed to update"
+				"MST allocation table for"
+				"pipe idx:%d\n",
+				pipe_ctx->pipe_idx);
+
+	dm_logger_write(link->ctx->logger, LOG_MST,
+			"%s  "
+			"stream_count: %d: \n ",
+			__func__,
+			link->mst_stream_alloc_table.stream_count);
+
+	for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+		dm_logger_write(link->ctx->logger, LOG_MST,
+		"stream_enc[%d]: 0x%x      "
+		"stream[%d].vcp_id: %d      "
+		"stream[%d].slot_count: %d\n",
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+	}
+
+	ASSERT(proposed_table.stream_count > 0);
+
+	/* program DP source TX for payload */
+	link_encoder->funcs->update_mst_stream_allocation_table(
+		link_encoder,
+		&link->mst_stream_alloc_table);
+
+	/* send down message */
+	dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+			stream->ctx,
+			stream);
+
+	dm_helpers_dp_mst_send_payload_allocation(
+			stream->ctx,
+			stream,
+			true);
+
+	/* slot X.Y for only current stream */
+	pbn_per_slot = get_pbn_per_slot(stream);
+	pbn = get_pbn_from_timing(pipe_ctx);
+	avg_time_slots_per_mtp = dal_fixed31_32_div(pbn, pbn_per_slot);
+
+	stream_encoder->funcs->set_mst_bandwidth(
+		stream_encoder,
+		avg_time_slots_per_mtp);
+
+	return DC_OK;
+
+}
+
+static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+{
+	struct dc_stream_state *stream = pipe_ctx->stream;
+	struct dc_link *link = stream->sink->link;
+	struct link_encoder *link_encoder = link->link_enc;
+	struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+	struct dp_mst_stream_allocation_table proposed_table = {0};
+	struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
+	uint8_t i;
+	bool mst_mode = (link->type == dc_connection_mst_branch);
+
+	/* deallocate_mst_payload is called before disable link. When mode or
+	 * disable/enable monitor, new stream is created which is not in link
+	 * stream[] yet. For this, payload is not allocated yet, so de-alloc
+	 * should not done. For new mode set, map_resources will get engine
+	 * for new stream, so stream_enc->id should be validated until here.
+	 */
+
+	/* slot X.Y */
+	stream_encoder->funcs->set_mst_bandwidth(
+		stream_encoder,
+		avg_time_slots_per_mtp);
+
+	/* TODO: which component is responsible for remove payload table? */
+	if (mst_mode) {
+		if (dm_helpers_dp_mst_write_payload_allocation_table(
+				stream->ctx,
+				stream,
+				&proposed_table,
+				false)) {
+
+			update_mst_stream_alloc_table(
+				link, pipe_ctx->stream_res.stream_enc, &proposed_table);
+		}
+		else {
+				dm_logger_write(link->ctx->logger, LOG_WARNING,
+						"Failed to update"
+						"MST allocation table for"
+						"pipe idx:%d\n",
+						pipe_ctx->pipe_idx);
+		}
+	}
+
+	dm_logger_write(link->ctx->logger, LOG_MST,
+			"%s"
+			"stream_count: %d: ",
+			__func__,
+			link->mst_stream_alloc_table.stream_count);
+
+	for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+		dm_logger_write(link->ctx->logger, LOG_MST,
+		"stream_enc[%d]: 0x%x      "
+		"stream[%d].vcp_id: %d      "
+		"stream[%d].slot_count: %d\n",
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+	}
+
+	link_encoder->funcs->update_mst_stream_allocation_table(
+		link_encoder,
+		&link->mst_stream_alloc_table);
+
+	if (mst_mode) {
+		dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+			stream->ctx,
+			stream);
+
+		dm_helpers_dp_mst_send_payload_allocation(
+			stream->ctx,
+			stream,
+			false);
+	}
+
+	return DC_OK;
+}
+
+void core_link_enable_stream(
+		struct dc_state *state,
+		struct pipe_ctx *pipe_ctx)
+{
+	struct dc  *core_dc = pipe_ctx->stream->ctx->dc;
+
+	enum dc_status status = enable_link(state, pipe_ctx);
+
+	if (status != DC_OK) {
+			dm_logger_write(pipe_ctx->stream->ctx->logger,
+			LOG_WARNING, "enabling link %u failed: %d\n",
+			pipe_ctx->stream->sink->link->link_index,
+			status);
+
+			/* Abort stream enable *unless* the failure was due to
+			 * DP link training - some DP monitors will recover and
+			 * show the stream anyway. But MST displays can't proceed
+			 * without link training.
+			 */
+			if (status != DC_FAIL_DP_LINK_TRAINING ||
+					pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+				BREAK_TO_DEBUGGER();
+				return;
+			}
+	}
+
+	/* turn off otg test pattern if enable */
+	pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+			CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+			COLOR_DEPTH_UNDEFINED);
+
+	core_dc->hwss.enable_stream(pipe_ctx);
+
+	if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+		allocate_mst_payload(pipe_ctx);
+
+	if (dc_is_dp_signal(pipe_ctx->stream->signal))
+		core_dc->hwss.unblank_stream(pipe_ctx,
+			&pipe_ctx->stream->sink->link->cur_link_settings);
+}
+
+void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+	struct dc  *core_dc = pipe_ctx->stream->ctx->dc;
+
+	if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+		deallocate_mst_payload(pipe_ctx);
+
+	core_dc->hwss.disable_stream(pipe_ctx, option);
+
+	disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
+}
+
+void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
+{
+	struct dc  *core_dc = pipe_ctx->stream->ctx->dc;
+
+	if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
+		return;
+
+	core_dc->hwss.set_avmute(pipe_ctx, enable);
+}
+

+ 775 - 0
drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c

@@ -0,0 +1,775 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "gpio_service_interface.h"
+#include "include/ddc_service_types.h"
+#include "include/grph_object_id.h"
+#include "include/dpcd_defs.h"
+#include "include/logger_interface.h"
+#include "include/vector.h"
+#include "core_types.h"
+#include "dc_link_ddc.h"
+
+#define AUX_POWER_UP_WA_DELAY 500
+#define I2C_OVER_AUX_DEFER_WA_DELAY 70
+
+/* CV smart dongle slave address for retrieving supported HDTV modes*/
+#define CV_SMART_DONGLE_ADDRESS 0x20
+/* DVI-HDMI dongle slave address for retrieving dongle signature*/
+#define DVI_HDMI_DONGLE_ADDRESS 0x68
+static const int8_t dvi_hdmi_dongle_signature_str[] = "6140063500G";
+struct dvi_hdmi_dongle_signature_data {
+	int8_t vendor[3];/* "AMD" */
+	uint8_t version[2];
+	uint8_t size;
+	int8_t id[11];/* "6140063500G"*/
+};
+/* DP-HDMI dongle slave address for retrieving dongle signature*/
+#define DP_HDMI_DONGLE_ADDRESS 0x40
+static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR";
+#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04
+
+struct dp_hdmi_dongle_signature_data {
+	int8_t id[15];/* "DP-HDMI ADAPTOR"*/
+	uint8_t eot;/* end of transmition '\x4' */
+};
+
+/* SCDC Address defines (HDMI 2.0)*/
+#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3
+#define HDMI_SCDC_ADDRESS  0x54
+#define HDMI_SCDC_SINK_VERSION 0x01
+#define HDMI_SCDC_SOURCE_VERSION 0x02
+#define HDMI_SCDC_UPDATE_0 0x10
+#define HDMI_SCDC_TMDS_CONFIG 0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
+#define HDMI_SCDC_CONFIG_0 0x30
+#define HDMI_SCDC_STATUS_FLAGS 0x40
+#define HDMI_SCDC_ERR_DETECT 0x50
+#define HDMI_SCDC_TEST_CONFIG 0xC0
+
+union hdmi_scdc_update_read_data {
+	uint8_t byte[2];
+	struct {
+		uint8_t STATUS_UPDATE:1;
+		uint8_t CED_UPDATE:1;
+		uint8_t RR_TEST:1;
+		uint8_t RESERVED:5;
+		uint8_t RESERVED2:8;
+	} fields;
+};
+
+union hdmi_scdc_status_flags_data {
+	uint8_t byte[2];
+	struct {
+		uint8_t CLOCK_DETECTED:1;
+		uint8_t CH0_LOCKED:1;
+		uint8_t CH1_LOCKED:1;
+		uint8_t CH2_LOCKED:1;
+		uint8_t RESERVED:4;
+		uint8_t RESERVED2:8;
+	} fields;
+};
+
+union hdmi_scdc_ced_data {
+	uint8_t byte[7];
+	struct {
+		uint8_t CH0_8LOW:8;
+		uint8_t CH0_7HIGH:7;
+		uint8_t CH0_VALID:1;
+		uint8_t CH1_8LOW:8;
+		uint8_t CH1_7HIGH:7;
+		uint8_t CH1_VALID:1;
+		uint8_t CH2_8LOW:8;
+		uint8_t CH2_7HIGH:7;
+		uint8_t CH2_VALID:1;
+		uint8_t CHECKSUM:8;
+	} fields;
+};
+
+union hdmi_scdc_test_config_Data {
+	uint8_t byte;
+	struct {
+		uint8_t TEST_READ_REQUEST_DELAY:7;
+		uint8_t TEST_READ_REQUEST: 1;
+	} fields;
+};
+
+struct i2c_payloads {
+	struct vector payloads;
+};
+
+struct aux_payloads {
+	struct vector payloads;
+};
+
+static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count)
+{
+	struct i2c_payloads *payloads;
+
+	payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL);
+
+	if (!payloads)
+		return NULL;
+
+	if (dal_vector_construct(
+		&payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
+		return payloads;
+
+	kfree(payloads);
+	return NULL;
+
+}
+
+static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
+{
+	return (struct i2c_payload *)p->payloads.container;
+}
+
+static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
+{
+	return p->payloads.count;
+}
+
+static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
+{
+	if (!p || !*p)
+		return;
+	dal_vector_destruct(&(*p)->payloads);
+	kfree(*p);
+	*p = NULL;
+
+}
+
+static struct aux_payloads *dal_ddc_aux_payloads_create(struct dc_context *ctx, uint32_t count)
+{
+	struct aux_payloads *payloads;
+
+	payloads = kzalloc(sizeof(struct aux_payloads), GFP_KERNEL);
+
+	if (!payloads)
+		return NULL;
+
+	if (dal_vector_construct(
+		&payloads->payloads, ctx, count, sizeof(struct aux_payload)))
+		return payloads;
+
+	kfree(payloads);
+	return NULL;
+}
+
+static struct aux_payload *dal_ddc_aux_payloads_get(struct aux_payloads *p)
+{
+	return (struct aux_payload *)p->payloads.container;
+}
+
+static uint32_t  dal_ddc_aux_payloads_get_count(struct aux_payloads *p)
+{
+	return p->payloads.count;
+}
+
+static void dal_ddc_aux_payloads_destroy(struct aux_payloads **p)
+{
+	if (!p || !*p)
+		return;
+
+	dal_vector_destruct(&(*p)->payloads);
+	kfree(*p);
+	*p = NULL;
+}
+
+#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+void dal_ddc_i2c_payloads_add(
+	struct i2c_payloads *payloads,
+	uint32_t address,
+	uint32_t len,
+	uint8_t *data,
+	bool write)
+{
+	uint32_t payload_size = EDID_SEGMENT_SIZE;
+	uint32_t pos;
+
+	for (pos = 0; pos < len; pos += payload_size) {
+		struct i2c_payload payload = {
+			.write = write,
+			.address = address,
+			.length = DDC_MIN(payload_size, len - pos),
+			.data = data + pos };
+		dal_vector_append(&payloads->payloads, &payload);
+	}
+
+}
+
+void dal_ddc_aux_payloads_add(
+	struct aux_payloads *payloads,
+	uint32_t address,
+	uint32_t len,
+	uint8_t *data,
+	bool write)
+{
+	uint32_t payload_size = DEFAULT_AUX_MAX_DATA_SIZE;
+	uint32_t pos;
+
+	for (pos = 0; pos < len; pos += payload_size) {
+		struct aux_payload payload = {
+			.i2c_over_aux = true,
+			.write = write,
+			.address = address,
+			.length = DDC_MIN(payload_size, len - pos),
+			.data = data + pos };
+		dal_vector_append(&payloads->payloads, &payload);
+	}
+}
+
+static void construct(
+	struct ddc_service *ddc_service,
+	struct ddc_service_init_data *init_data)
+{
+	enum connector_id connector_id =
+		dal_graphics_object_id_get_connector_id(init_data->id);
+
+	struct gpio_service *gpio_service = init_data->ctx->gpio_service;
+	struct graphics_object_i2c_info i2c_info;
+	struct gpio_ddc_hw_info hw_info;
+	struct dc_bios *dcb = init_data->ctx->dc_bios;
+
+	ddc_service->link = init_data->link;
+	ddc_service->ctx = init_data->ctx;
+
+	if (BP_RESULT_OK != dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info)) {
+		ddc_service->ddc_pin = NULL;
+	} else {
+		hw_info.ddc_channel = i2c_info.i2c_line;
+		hw_info.hw_supported = i2c_info.i2c_hw_assist;
+
+		ddc_service->ddc_pin = dal_gpio_create_ddc(
+			gpio_service,
+			i2c_info.gpio_info.clk_a_register_index,
+			1 << i2c_info.gpio_info.clk_a_shift,
+			&hw_info);
+	}
+
+	ddc_service->flags.EDID_QUERY_DONE_ONCE = false;
+	ddc_service->flags.FORCE_READ_REPEATED_START = false;
+	ddc_service->flags.EDID_STRESS_READ = false;
+
+	ddc_service->flags.IS_INTERNAL_DISPLAY =
+		connector_id == CONNECTOR_ID_EDP ||
+		connector_id == CONNECTOR_ID_LVDS;
+
+	ddc_service->wa.raw = 0;
+}
+
+struct ddc_service *dal_ddc_service_create(
+	struct ddc_service_init_data *init_data)
+{
+	struct ddc_service *ddc_service;
+
+	ddc_service = kzalloc(sizeof(struct ddc_service), GFP_KERNEL);
+
+	if (!ddc_service)
+		return NULL;
+
+	construct(ddc_service, init_data);
+	return ddc_service;
+}
+
+static void destruct(struct ddc_service *ddc)
+{
+	if (ddc->ddc_pin)
+		dal_gpio_destroy_ddc(&ddc->ddc_pin);
+}
+
+void dal_ddc_service_destroy(struct ddc_service **ddc)
+{
+	if (!ddc || !*ddc) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+	destruct(*ddc);
+	kfree(*ddc);
+	*ddc = NULL;
+}
+
+enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc)
+{
+	return DDC_SERVICE_TYPE_CONNECTOR;
+}
+
+void dal_ddc_service_set_transaction_type(
+	struct ddc_service *ddc,
+	enum ddc_transaction_type type)
+{
+	ddc->transaction_type = type;
+}
+
+bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc)
+{
+	switch (ddc->transaction_type) {
+	case DDC_TRANSACTION_TYPE_I2C_OVER_AUX:
+	case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER:
+	case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_RETRY_DEFER:
+		return true;
+	default:
+		break;
+	}
+	return false;
+}
+
+void ddc_service_set_dongle_type(struct ddc_service *ddc,
+		enum display_dongle_type dongle_type)
+{
+	ddc->dongle_type = dongle_type;
+}
+
+static uint32_t defer_delay_converter_wa(
+	struct ddc_service *ddc,
+	uint32_t defer_delay)
+{
+	struct dc_link *link = ddc->link;
+
+	if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_4 &&
+		!memcmp(link->dpcd_caps.branch_dev_name,
+			DP_DVI_CONVERTER_ID_4,
+			sizeof(link->dpcd_caps.branch_dev_name)))
+		return defer_delay > I2C_OVER_AUX_DEFER_WA_DELAY ?
+			defer_delay : I2C_OVER_AUX_DEFER_WA_DELAY;
+
+	return defer_delay;
+}
+
+#define DP_TRANSLATOR_DELAY 5
+
+uint32_t get_defer_delay(struct ddc_service *ddc)
+{
+	uint32_t defer_delay = 0;
+
+	switch (ddc->transaction_type) {
+	case DDC_TRANSACTION_TYPE_I2C_OVER_AUX:
+		if ((DISPLAY_DONGLE_DP_VGA_CONVERTER == ddc->dongle_type) ||
+			(DISPLAY_DONGLE_DP_DVI_CONVERTER == ddc->dongle_type) ||
+			(DISPLAY_DONGLE_DP_HDMI_CONVERTER ==
+				ddc->dongle_type)) {
+
+			defer_delay = DP_TRANSLATOR_DELAY;
+
+			defer_delay =
+				defer_delay_converter_wa(ddc, defer_delay);
+
+		} else /*sink has a delay different from an Active Converter*/
+			defer_delay = 0;
+		break;
+	case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER:
+		defer_delay = DP_TRANSLATOR_DELAY;
+		break;
+	default:
+		break;
+	}
+	return defer_delay;
+}
+
+static bool i2c_read(
+	struct ddc_service *ddc,
+	uint32_t address,
+	uint8_t *buffer,
+	uint32_t len)
+{
+	uint8_t offs_data = 0;
+	struct i2c_payload payloads[2] = {
+		{
+		.write = true,
+		.address = address,
+		.length = 1,
+		.data = &offs_data },
+		{
+		.write = false,
+		.address = address,
+		.length = len,
+		.data = buffer } };
+
+	struct i2c_command command = {
+		.payloads = payloads,
+		.number_of_payloads = 2,
+		.engine = DDC_I2C_COMMAND_ENGINE,
+		.speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+
+	return dm_helpers_submit_i2c(
+			ddc->ctx,
+			ddc->link,
+			&command);
+}
+
+void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
+	struct ddc_service *ddc,
+	struct display_sink_capability *sink_cap)
+{
+	uint8_t i;
+	bool is_valid_hdmi_signature;
+	enum display_dongle_type *dongle = &sink_cap->dongle_type;
+	uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
+	bool is_type2_dongle = false;
+	struct dp_hdmi_dongle_signature_data *dongle_signature;
+
+	/* Assume we have no valid DP passive dongle connected */
+	*dongle = DISPLAY_DONGLE_NONE;
+	sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
+
+	/* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
+	if (!i2c_read(
+		ddc,
+		DP_HDMI_DONGLE_ADDRESS,
+		type2_dongle_buf,
+		sizeof(type2_dongle_buf))) {
+		*dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+		sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
+
+		CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
+				"DP-DVI passive dongle %dMhz: ",
+				DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+		return;
+	}
+
+	/* Check if Type 2 dongle.*/
+	if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID)
+		is_type2_dongle = true;
+
+	dongle_signature =
+		(struct dp_hdmi_dongle_signature_data *)type2_dongle_buf;
+
+	is_valid_hdmi_signature = true;
+
+	/* Check EOT */
+	if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) {
+		is_valid_hdmi_signature = false;
+	}
+
+	/* Check signature */
+	for (i = 0; i < sizeof(dongle_signature->id); ++i) {
+		/* If its not the right signature,
+		 * skip mismatch in subversion byte.*/
+		if (dongle_signature->id[i] !=
+			dp_hdmi_dongle_signature_str[i] && i != 3) {
+
+			if (is_type2_dongle) {
+				is_valid_hdmi_signature = false;
+				break;
+			}
+
+		}
+	}
+
+	if (is_type2_dongle) {
+		uint32_t max_tmds_clk =
+			type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK];
+
+		max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2;
+
+		if (0 == max_tmds_clk ||
+				max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK ||
+				max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) {
+			*dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+
+			CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+					sizeof(type2_dongle_buf),
+					"DP-DVI passive dongle %dMhz: ",
+					DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+		} else {
+			if (is_valid_hdmi_signature == true) {
+				*dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
+
+				CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+						sizeof(type2_dongle_buf),
+						"Type 2 DP-HDMI passive dongle %dMhz: ",
+						max_tmds_clk);
+			} else {
+				*dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
+
+				CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+						sizeof(type2_dongle_buf),
+						"Type 2 DP-HDMI passive dongle (no signature) %dMhz: ",
+						max_tmds_clk);
+
+			}
+
+			/* Multiply by 1000 to convert to kHz. */
+			sink_cap->max_hdmi_pixel_clock =
+				max_tmds_clk * 1000;
+		}
+
+	} else {
+		if (is_valid_hdmi_signature == true) {
+			*dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
+
+			CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+					sizeof(type2_dongle_buf),
+					"Type 1 DP-HDMI passive dongle %dMhz: ",
+					sink_cap->max_hdmi_pixel_clock / 1000);
+		} else {
+			*dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
+
+			CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+					sizeof(type2_dongle_buf),
+					"Type 1 DP-HDMI passive dongle (no signature) %dMhz: ",
+					sink_cap->max_hdmi_pixel_clock / 1000);
+		}
+	}
+
+	return;
+}
+
+enum {
+	DP_SINK_CAP_SIZE =
+		DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1
+};
+
+bool dal_ddc_service_query_ddc_data(
+	struct ddc_service *ddc,
+	uint32_t address,
+	uint8_t *write_buf,
+	uint32_t write_size,
+	uint8_t *read_buf,
+	uint32_t read_size)
+{
+	bool ret;
+	uint32_t payload_size =
+		dal_ddc_service_is_in_aux_transaction_mode(ddc) ?
+			DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;
+
+	uint32_t write_payloads =
+		(write_size + payload_size - 1) / payload_size;
+
+	uint32_t read_payloads =
+		(read_size + payload_size - 1) / payload_size;
+
+	uint32_t payloads_num = write_payloads + read_payloads;
+
+	if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
+		return false;
+
+	/*TODO: len of payload data for i2c and aux is uint8!!!!,
+	 *  but we want to read 256 over i2c!!!!*/
+	if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
+
+		struct aux_payloads *payloads =
+			dal_ddc_aux_payloads_create(ddc->ctx, payloads_num);
+
+		struct aux_command command = {
+			.payloads = dal_ddc_aux_payloads_get(payloads),
+			.number_of_payloads = 0,
+			.defer_delay = get_defer_delay(ddc),
+			.max_defer_write_retry = 0 };
+
+		dal_ddc_aux_payloads_add(
+			payloads, address, write_size, write_buf, true);
+
+		dal_ddc_aux_payloads_add(
+			payloads, address, read_size, read_buf, false);
+
+		command.number_of_payloads =
+			dal_ddc_aux_payloads_get_count(payloads);
+
+		ret = dal_i2caux_submit_aux_command(
+				ddc->ctx->i2caux,
+				ddc->ddc_pin,
+				&command);
+
+		dal_ddc_aux_payloads_destroy(&payloads);
+
+	} else {
+		struct i2c_payloads *payloads =
+			dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
+
+		struct i2c_command command = {
+			.payloads = dal_ddc_i2c_payloads_get(payloads),
+			.number_of_payloads = 0,
+			.engine = DDC_I2C_COMMAND_ENGINE,
+			.speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+
+		dal_ddc_i2c_payloads_add(
+			payloads, address, write_size, write_buf, true);
+
+		dal_ddc_i2c_payloads_add(
+			payloads, address, read_size, read_buf, false);
+
+		command.number_of_payloads =
+			dal_ddc_i2c_payloads_get_count(payloads);
+
+		ret = dm_helpers_submit_i2c(
+				ddc->ctx,
+				ddc->link,
+				&command);
+
+		dal_ddc_i2c_payloads_destroy(&payloads);
+	}
+
+	return ret;
+}
+
+enum ddc_result dal_ddc_service_read_dpcd_data(
+	struct ddc_service *ddc,
+	bool i2c,
+	enum i2c_mot_mode mot,
+	uint32_t address,
+	uint8_t *data,
+	uint32_t len)
+{
+	struct aux_payload read_payload = {
+		.i2c_over_aux = i2c,
+		.write = false,
+		.address = address,
+		.length = len,
+		.data = data,
+	};
+	struct aux_command command = {
+		.payloads = &read_payload,
+		.number_of_payloads = 1,
+		.defer_delay = 0,
+		.max_defer_write_retry = 0,
+		.mot = mot
+	};
+
+	if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+		BREAK_TO_DEBUGGER();
+		return DDC_RESULT_FAILED_INVALID_OPERATION;
+	}
+
+	if (dal_i2caux_submit_aux_command(
+		ddc->ctx->i2caux,
+		ddc->ddc_pin,
+		&command))
+		return DDC_RESULT_SUCESSFULL;
+
+	return DDC_RESULT_FAILED_OPERATION;
+}
+
+enum ddc_result dal_ddc_service_write_dpcd_data(
+	struct ddc_service *ddc,
+	bool i2c,
+	enum i2c_mot_mode mot,
+	uint32_t address,
+	const uint8_t *data,
+	uint32_t len)
+{
+	struct aux_payload write_payload = {
+		.i2c_over_aux = i2c,
+		.write = true,
+		.address = address,
+		.length = len,
+		.data = (uint8_t *)data,
+	};
+	struct aux_command command = {
+		.payloads = &write_payload,
+		.number_of_payloads = 1,
+		.defer_delay = 0,
+		.max_defer_write_retry = 0,
+		.mot = mot
+	};
+
+	if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+		BREAK_TO_DEBUGGER();
+		return DDC_RESULT_FAILED_INVALID_OPERATION;
+	}
+
+	if (dal_i2caux_submit_aux_command(
+		ddc->ctx->i2caux,
+		ddc->ddc_pin,
+		&command))
+		return DDC_RESULT_SUCESSFULL;
+
+	return DDC_RESULT_FAILED_OPERATION;
+}
+
+/*test only function*/
+void dal_ddc_service_set_ddc_pin(
+	struct ddc_service *ddc_service,
+	struct ddc *ddc)
+{
+	ddc_service->ddc_pin = ddc;
+}
+
+struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service)
+{
+	return ddc_service->ddc_pin;
+}
+
+void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
+		uint32_t pix_clk,
+		bool lte_340_scramble)
+{
+	bool over_340_mhz = pix_clk > 340000 ? 1 : 0;
+	uint8_t slave_address = HDMI_SCDC_ADDRESS;
+	uint8_t offset = HDMI_SCDC_SINK_VERSION;
+	uint8_t sink_version = 0;
+	uint8_t write_buffer[2] = {0};
+	/*Lower than 340 Scramble bit from SCDC caps*/
+
+	dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
+			sizeof(offset), &sink_version, sizeof(sink_version));
+	if (sink_version == 1) {
+		/*Source Version = 1*/
+		write_buffer[0] = HDMI_SCDC_SOURCE_VERSION;
+		write_buffer[1] = 1;
+		dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+				write_buffer, sizeof(write_buffer), NULL, 0);
+		/*Read Request from SCDC caps*/
+	}
+	write_buffer[0] = HDMI_SCDC_TMDS_CONFIG;
+
+	if (over_340_mhz) {
+		write_buffer[1] = 3;
+	} else if (lte_340_scramble) {
+		write_buffer[1] = 1;
+	} else {
+		write_buffer[1] = 0;
+	}
+	dal_ddc_service_query_ddc_data(ddc_service, slave_address, write_buffer,
+			sizeof(write_buffer), NULL, 0);
+}
+
+void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
+{
+	uint8_t slave_address = HDMI_SCDC_ADDRESS;
+	uint8_t offset = HDMI_SCDC_TMDS_CONFIG;
+	uint8_t tmds_config = 0;
+
+	dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
+			sizeof(offset), &tmds_config, sizeof(tmds_config));
+	if (tmds_config & 0x1) {
+		union hdmi_scdc_status_flags_data status_data = { {0} };
+		uint8_t scramble_status = 0;
+
+		offset = HDMI_SCDC_SCRAMBLER_STATUS;
+		dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+				&offset, sizeof(offset), &scramble_status,
+				sizeof(scramble_status));
+		offset = HDMI_SCDC_STATUS_FLAGS;
+		dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+				&offset, sizeof(offset), status_data.byte,
+				sizeof(status_data.byte));
+	}
+}
+

+ 2587 - 0
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c

@@ -0,0 +1,2587 @@
+/* Copyright 2015 Advanced Micro Devices, Inc. */
+#include "dm_services.h"
+#include "dc.h"
+#include "dc_link_dp.h"
+#include "dm_helpers.h"
+
+#include "inc/core_types.h"
+#include "link_hwss.h"
+#include "dc_link_ddc.h"
+#include "core_status.h"
+#include "dpcd_defs.h"
+
+#include "resource.h"
+
+/* maximum pre emphasis level allowed for each voltage swing level*/
+static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = {
+		PRE_EMPHASIS_LEVEL3,
+		PRE_EMPHASIS_LEVEL2,
+		PRE_EMPHASIS_LEVEL1,
+		PRE_EMPHASIS_DISABLED };
+
+enum {
+	POST_LT_ADJ_REQ_LIMIT = 6,
+	POST_LT_ADJ_REQ_TIMEOUT = 200
+};
+
+enum {
+	LINK_TRAINING_MAX_RETRY_COUNT = 5,
+	/* to avoid infinite loop where-in the receiver
+	 * switches between different VS
+	 */
+	LINK_TRAINING_MAX_CR_RETRY = 100
+};
+
+static bool decide_fallback_link_setting(
+		struct dc_link_settings initial_link_settings,
+		struct dc_link_settings *current_link_setting,
+		enum link_training_result training_result);
+static struct dc_link_settings get_common_supported_link_settings (
+		struct dc_link_settings link_setting_a,
+		struct dc_link_settings link_setting_b);
+
+static void wait_for_training_aux_rd_interval(
+	struct dc_link *link,
+	uint32_t default_wait_in_micro_secs)
+{
+	union training_aux_rd_interval training_rd_interval;
+
+	/* overwrite the delay if rev > 1.1*/
+	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+		/* DP 1.2 or later - retrieve delay through
+		 * "DPCD_ADDR_TRAINING_AUX_RD_INTERVAL" register */
+		core_link_read_dpcd(
+			link,
+			DP_TRAINING_AUX_RD_INTERVAL,
+			(uint8_t *)&training_rd_interval,
+			sizeof(training_rd_interval));
+
+		if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
+			default_wait_in_micro_secs =
+				training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+	}
+
+	udelay(default_wait_in_micro_secs);
+
+	dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+		"%s:\n wait = %d\n",
+		__func__,
+		default_wait_in_micro_secs);
+}
+
+static void dpcd_set_training_pattern(
+	struct dc_link *link,
+	union dpcd_training_pattern dpcd_pattern)
+{
+	core_link_write_dpcd(
+		link,
+		DP_TRAINING_PATTERN_SET,
+		&dpcd_pattern.raw,
+		1);
+
+	dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+		"%s\n %x pattern = %x\n",
+		__func__,
+		DP_TRAINING_PATTERN_SET,
+		dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+}
+
+static void dpcd_set_link_settings(
+	struct dc_link *link,
+	const struct link_training_settings *lt_settings)
+{
+	uint8_t rate = (uint8_t)
+	(lt_settings->link_settings.link_rate);
+
+	union down_spread_ctrl downspread = {{0}};
+	union lane_count_set lane_count_set = {{0}};
+	uint8_t link_set_buffer[2];
+
+	downspread.raw = (uint8_t)
+	(lt_settings->link_settings.link_spread);
+
+	lane_count_set.bits.LANE_COUNT_SET =
+	lt_settings->link_settings.lane_count;
+
+	lane_count_set.bits.ENHANCED_FRAMING = 1;
+
+	lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
+		link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
+
+	link_set_buffer[0] = rate;
+	link_set_buffer[1] = lane_count_set.raw;
+
+	core_link_write_dpcd(link, DP_LINK_BW_SET,
+	link_set_buffer, 2);
+	core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+	&downspread.raw, sizeof(downspread));
+
+	dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+		"%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
+		__func__,
+		DP_LINK_BW_SET,
+		lt_settings->link_settings.link_rate,
+		DP_LANE_COUNT_SET,
+		lt_settings->link_settings.lane_count,
+		DP_DOWNSPREAD_CTRL,
+		lt_settings->link_settings.link_spread);
+
+}
+
+static enum dpcd_training_patterns
+	hw_training_pattern_to_dpcd_training_pattern(
+	struct dc_link *link,
+	enum hw_dp_training_pattern pattern)
+{
+	enum dpcd_training_patterns dpcd_tr_pattern =
+	DPCD_TRAINING_PATTERN_VIDEOIDLE;
+
+	switch (pattern) {
+	case HW_DP_TRAINING_PATTERN_1:
+		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1;
+		break;
+	case HW_DP_TRAINING_PATTERN_2:
+		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2;
+		break;
+	case HW_DP_TRAINING_PATTERN_3:
+		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3;
+		break;
+	case HW_DP_TRAINING_PATTERN_4:
+		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4;
+		break;
+	default:
+		ASSERT(0);
+		dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+			"%s: Invalid HW Training pattern: %d\n",
+			__func__, pattern);
+		break;
+	}
+
+	return dpcd_tr_pattern;
+
+}
+
+static void dpcd_set_lt_pattern_and_lane_settings(
+	struct dc_link *link,
+	const struct link_training_settings *lt_settings,
+	enum hw_dp_training_pattern pattern)
+{
+	union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
+	const uint32_t dpcd_base_lt_offset =
+	DP_TRAINING_PATTERN_SET;
+	uint8_t dpcd_lt_buffer[5] = {0};
+	union dpcd_training_pattern dpcd_pattern = {{0}};
+	uint32_t lane;
+	uint32_t size_in_bytes;
+	bool edp_workaround = false; /* TODO link_prop.INTERNAL */
+
+	/*****************************************************************
+	* DpcdAddress_TrainingPatternSet
+	*****************************************************************/
+	dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
+		hw_training_pattern_to_dpcd_training_pattern(link, pattern);
+
+	dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - dpcd_base_lt_offset]
+		= dpcd_pattern.raw;
+
+	dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+		"%s\n %x pattern = %x\n",
+		__func__,
+		DP_TRAINING_PATTERN_SET,
+		dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+
+	/*****************************************************************
+	* DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set
+	*****************************************************************/
+	for (lane = 0; lane <
+		(uint32_t)(lt_settings->link_settings.lane_count); lane++) {
+
+		dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
+		(uint8_t)(lt_settings->lane_settings[lane].VOLTAGE_SWING);
+		dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
+		(uint8_t)(lt_settings->lane_settings[lane].PRE_EMPHASIS);
+
+		dpcd_lane[lane].bits.MAX_SWING_REACHED =
+		(lt_settings->lane_settings[lane].VOLTAGE_SWING ==
+		VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+		dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+		(lt_settings->lane_settings[lane].PRE_EMPHASIS ==
+		PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+	}
+
+	/* concatinate everything into one buffer*/
+
+	size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]);
+
+	 // 0x00103 - 0x00102
+	memmove(
+		&dpcd_lt_buffer[DP_TRAINING_LANE0_SET - dpcd_base_lt_offset],
+		dpcd_lane,
+		size_in_bytes);
+
+	dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+		"%s:\n %x VS set = %x  PE set = %x \
+		max VS Reached = %x  max PE Reached = %x\n",
+		__func__,
+		DP_TRAINING_LANE0_SET,
+		dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+		dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+		dpcd_lane[0].bits.MAX_SWING_REACHED,
+		dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+
+	if (edp_workaround) {
+		/* for eDP write in 2 parts because the 5-byte burst is
+		* causing issues on some eDP panels (EPR#366724)
+		*/
+		core_link_write_dpcd(
+			link,
+			DP_TRAINING_PATTERN_SET,
+			&dpcd_pattern.raw,
+			sizeof(dpcd_pattern.raw) );
+
+		core_link_write_dpcd(
+			link,
+			DP_TRAINING_LANE0_SET,
+			(uint8_t *)(dpcd_lane),
+			size_in_bytes);
+
+		} else
+		/* write it all in (1 + number-of-lanes)-byte burst*/
+			core_link_write_dpcd(
+				link,
+				dpcd_base_lt_offset,
+				dpcd_lt_buffer,
+				size_in_bytes + sizeof(dpcd_pattern.raw) );
+
+	link->cur_lane_setting = lt_settings->lane_settings[0];
+}
+
+static bool is_cr_done(enum dc_lane_count ln_count,
+	union lane_status *dpcd_lane_status)
+{
+	bool done = true;
+	uint32_t lane;
+	/*LANEx_CR_DONE bits All 1's?*/
+	for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
+		if (!dpcd_lane_status[lane].bits.CR_DONE_0)
+			done = false;
+	}
+	return done;
+
+}
+
+static bool is_ch_eq_done(enum dc_lane_count ln_count,
+	union lane_status *dpcd_lane_status,
+	union lane_align_status_updated *lane_status_updated)
+{
+	bool done = true;
+	uint32_t lane;
+	if (!lane_status_updated->bits.INTERLANE_ALIGN_DONE)
+		done = false;
+	else {
+		for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
+			if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0 ||
+				!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0)
+				done = false;
+		}
+	}
+	return done;
+
+}
+
+static void update_drive_settings(
+		struct link_training_settings *dest,
+		struct link_training_settings src)
+{
+	uint32_t lane;
+	for (lane = 0; lane < src.link_settings.lane_count; lane++) {
+		dest->lane_settings[lane].VOLTAGE_SWING =
+			src.lane_settings[lane].VOLTAGE_SWING;
+		dest->lane_settings[lane].PRE_EMPHASIS =
+			src.lane_settings[lane].PRE_EMPHASIS;
+		dest->lane_settings[lane].POST_CURSOR2 =
+			src.lane_settings[lane].POST_CURSOR2;
+	}
+}
+
+static uint8_t get_nibble_at_index(const uint8_t *buf,
+	uint32_t index)
+{
+	uint8_t nibble;
+	nibble = buf[index / 2];
+
+	if (index % 2)
+		nibble >>= 4;
+	else
+		nibble &= 0x0F;
+
+	return nibble;
+}
+
+static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing(
+	enum dc_voltage_swing voltage)
+{
+	enum dc_pre_emphasis pre_emphasis;
+	pre_emphasis = PRE_EMPHASIS_MAX_LEVEL;
+
+	if (voltage <= VOLTAGE_SWING_MAX_LEVEL)
+		pre_emphasis = voltage_swing_to_pre_emphasis[voltage];
+
+	return pre_emphasis;
+
+}
+
+static void find_max_drive_settings(
+	const struct link_training_settings *link_training_setting,
+	struct link_training_settings *max_lt_setting)
+{
+	uint32_t lane;
+	struct dc_lane_settings max_requested;
+
+	max_requested.VOLTAGE_SWING =
+		link_training_setting->
+		lane_settings[0].VOLTAGE_SWING;
+	max_requested.PRE_EMPHASIS =
+		link_training_setting->
+		lane_settings[0].PRE_EMPHASIS;
+	/*max_requested.postCursor2 =
+	 * link_training_setting->laneSettings[0].postCursor2;*/
+
+	/* Determine what the maximum of the requested settings are*/
+	for (lane = 1; lane < link_training_setting->link_settings.lane_count;
+			lane++) {
+		if (link_training_setting->lane_settings[lane].VOLTAGE_SWING >
+			max_requested.VOLTAGE_SWING)
+
+			max_requested.VOLTAGE_SWING =
+			link_training_setting->
+			lane_settings[lane].VOLTAGE_SWING;
+
+		if (link_training_setting->lane_settings[lane].PRE_EMPHASIS >
+				max_requested.PRE_EMPHASIS)
+			max_requested.PRE_EMPHASIS =
+			link_training_setting->
+			lane_settings[lane].PRE_EMPHASIS;
+
+		/*
+		if (link_training_setting->laneSettings[lane].postCursor2 >
+		 max_requested.postCursor2)
+		{
+		max_requested.postCursor2 =
+		link_training_setting->laneSettings[lane].postCursor2;
+		}
+		*/
+	}
+
+	/* make sure the requested settings are
+	 * not higher than maximum settings*/
+	if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL)
+		max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL;
+
+	if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL)
+		max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL;
+	/*
+	if (max_requested.postCursor2 > PostCursor2_MaxLevel)
+	max_requested.postCursor2 = PostCursor2_MaxLevel;
+	*/
+
+	/* make sure the pre-emphasis matches the voltage swing*/
+	if (max_requested.PRE_EMPHASIS >
+		get_max_pre_emphasis_for_voltage_swing(
+			max_requested.VOLTAGE_SWING))
+		max_requested.PRE_EMPHASIS =
+		get_max_pre_emphasis_for_voltage_swing(
+			max_requested.VOLTAGE_SWING);
+
+	/*
+	 * Post Cursor2 levels are completely independent from
+	 * pre-emphasis (Post Cursor1) levels. But Post Cursor2 levels
+	 * can only be applied to each allowable combination of voltage
+	 * swing and pre-emphasis levels */
+	 /* if ( max_requested.postCursor2 >
+	  *  getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing))
+	  *  max_requested.postCursor2 =
+	  *  getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing);
+	  */
+
+	max_lt_setting->link_settings.link_rate =
+		link_training_setting->link_settings.link_rate;
+	max_lt_setting->link_settings.lane_count =
+	link_training_setting->link_settings.lane_count;
+	max_lt_setting->link_settings.link_spread =
+		link_training_setting->link_settings.link_spread;
+
+	for (lane = 0; lane <
+		link_training_setting->link_settings.lane_count;
+		lane++) {
+		max_lt_setting->lane_settings[lane].VOLTAGE_SWING =
+			max_requested.VOLTAGE_SWING;
+		max_lt_setting->lane_settings[lane].PRE_EMPHASIS =
+			max_requested.PRE_EMPHASIS;
+		/*max_lt_setting->laneSettings[lane].postCursor2 =
+		 * max_requested.postCursor2;
+		 */
+	}
+
+}
+
+static void get_lane_status_and_drive_settings(
+	struct dc_link *link,
+	const struct link_training_settings *link_training_setting,
+	union lane_status *ln_status,
+	union lane_align_status_updated *ln_status_updated,
+	struct link_training_settings *req_settings)
+{
+	uint8_t dpcd_buf[6] = {0};
+	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {{{0}}};
+	struct link_training_settings request_settings = {{0}};
+	uint32_t lane;
+
+	memset(req_settings, '\0', sizeof(struct link_training_settings));
+
+	core_link_read_dpcd(
+		link,
+		DP_LANE0_1_STATUS,
+		(uint8_t *)(dpcd_buf),
+		sizeof(dpcd_buf));
+
+	for (lane = 0; lane <
+		(uint32_t)(link_training_setting->link_settings.lane_count);
+		lane++) {
+
+		ln_status[lane].raw =
+			get_nibble_at_index(&dpcd_buf[0], lane);
+		dpcd_lane_adjust[lane].raw =
+			get_nibble_at_index(&dpcd_buf[4], lane);
+	}
+
+	ln_status_updated->raw = dpcd_buf[2];
+
+	dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+		"%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ",
+		__func__,
+		DP_LANE0_1_STATUS, dpcd_buf[0],
+		DP_LANE2_3_STATUS, dpcd_buf[1]);
+
+	dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+		"%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n",
+		__func__,
+		DP_ADJUST_REQUEST_LANE0_1,
+		dpcd_buf[4],
+		DP_ADJUST_REQUEST_LANE2_3,
+		dpcd_buf[5]);
+
+	/*copy to req_settings*/
+	request_settings.link_settings.lane_count =
+		link_training_setting->link_settings.lane_count;
+	request_settings.link_settings.link_rate =
+		link_training_setting->link_settings.link_rate;
+	request_settings.link_settings.link_spread =
+		link_training_setting->link_settings.link_spread;
+
+	for (lane = 0; lane <
+		(uint32_t)(link_training_setting->link_settings.lane_count);
+		lane++) {
+
+		request_settings.lane_settings[lane].VOLTAGE_SWING =
+			(enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits.
+				VOLTAGE_SWING_LANE);
+		request_settings.lane_settings[lane].PRE_EMPHASIS =
+			(enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits.
+				PRE_EMPHASIS_LANE);
+	}
+
+	/*Note: for postcursor2, read adjusted
+	 * postcursor2 settings from*/
+	/*DpcdAddress_AdjustRequestPostCursor2 =
+	 *0x020C (not implemented yet)*/
+
+	/* we find the maximum of the requested settings across all lanes*/
+	/* and set this maximum for all lanes*/
+	find_max_drive_settings(&request_settings, req_settings);
+
+	/* if post cursor 2 is needed in the future,
+	 * read DpcdAddress_AdjustRequestPostCursor2 = 0x020C
+	 */
+
+}
+
+static void dpcd_set_lane_settings(
+	struct dc_link *link,
+	const struct link_training_settings *link_training_setting)
+{
+	union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
+	uint32_t lane;
+
+	for (lane = 0; lane <
+		(uint32_t)(link_training_setting->
+		link_settings.lane_count);
+		lane++) {
+		dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
+			(uint8_t)(link_training_setting->
+			lane_settings[lane].VOLTAGE_SWING);
+		dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
+			(uint8_t)(link_training_setting->
+			lane_settings[lane].PRE_EMPHASIS);
+		dpcd_lane[lane].bits.MAX_SWING_REACHED =
+			(link_training_setting->
+			lane_settings[lane].VOLTAGE_SWING ==
+			VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+		dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+			(link_training_setting->
+			lane_settings[lane].PRE_EMPHASIS ==
+			PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+	}
+
+	core_link_write_dpcd(link,
+		DP_TRAINING_LANE0_SET,
+		(uint8_t *)(dpcd_lane),
+		link_training_setting->link_settings.lane_count);
+
+	/*
+	if (LTSettings.link.rate == LinkRate_High2)
+	{
+		DpcdTrainingLaneSet2 dpcd_lane2[lane_count_DPMax] = {0};
+		for ( uint32_t lane = 0;
+		lane < lane_count_DPMax; lane++)
+		{
+			dpcd_lane2[lane].bits.post_cursor2_set =
+			static_cast<unsigned char>(
+			LTSettings.laneSettings[lane].postCursor2);
+			dpcd_lane2[lane].bits.max_post_cursor2_reached = 0;
+		}
+		m_pDpcdAccessSrv->WriteDpcdData(
+		DpcdAddress_Lane0Set2,
+		reinterpret_cast<unsigned char*>(dpcd_lane2),
+		LTSettings.link.lanes);
+	}
+	*/
+
+	dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+		"%s\n %x VS set = %x  PE set = %x \
+		max VS Reached = %x  max PE Reached = %x\n",
+		__func__,
+		DP_TRAINING_LANE0_SET,
+		dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+		dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+		dpcd_lane[0].bits.MAX_SWING_REACHED,
+		dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+
+	link->cur_lane_setting = link_training_setting->lane_settings[0];
+
+}
+
+static bool is_max_vs_reached(
+	const struct link_training_settings *lt_settings)
+{
+	uint32_t lane;
+	for (lane = 0; lane <
+		(uint32_t)(lt_settings->link_settings.lane_count);
+		lane++) {
+		if (lt_settings->lane_settings[lane].VOLTAGE_SWING
+			== VOLTAGE_SWING_MAX_LEVEL)
+			return true;
+	}
+	return false;
+
+}
+
+void dc_link_dp_set_drive_settings(
+	struct dc_link *link,
+	struct link_training_settings *lt_settings)
+{
+	/* program ASIC PHY settings*/
+	dp_set_hw_lane_settings(link, lt_settings);
+
+	/* Notify DP sink the PHY settings from source */
+	dpcd_set_lane_settings(link, lt_settings);
+}
+
+static bool perform_post_lt_adj_req_sequence(
+	struct dc_link *link,
+	struct link_training_settings *lt_settings)
+{
+	enum dc_lane_count lane_count =
+	lt_settings->link_settings.lane_count;
+
+	uint32_t adj_req_count;
+	uint32_t adj_req_timer;
+	bool req_drv_setting_changed;
+	uint32_t lane;
+
+	req_drv_setting_changed = false;
+	for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT;
+	adj_req_count++) {
+
+		req_drv_setting_changed = false;
+
+		for (adj_req_timer = 0;
+			adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT;
+			adj_req_timer++) {
+
+			struct link_training_settings req_settings;
+			union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+			union lane_align_status_updated
+				dpcd_lane_status_updated;
+
+			get_lane_status_and_drive_settings(
+			link,
+			lt_settings,
+			dpcd_lane_status,
+			&dpcd_lane_status_updated,
+			&req_settings);
+
+			if (dpcd_lane_status_updated.bits.
+					POST_LT_ADJ_REQ_IN_PROGRESS == 0)
+				return true;
+
+			if (!is_cr_done(lane_count, dpcd_lane_status))
+				return false;
+
+			if (!is_ch_eq_done(
+				lane_count,
+				dpcd_lane_status,
+				&dpcd_lane_status_updated))
+				return false;
+
+			for (lane = 0; lane < (uint32_t)(lane_count); lane++) {
+
+				if (lt_settings->
+				lane_settings[lane].VOLTAGE_SWING !=
+				req_settings.lane_settings[lane].
+				VOLTAGE_SWING ||
+				lt_settings->lane_settings[lane].PRE_EMPHASIS !=
+				req_settings.lane_settings[lane].PRE_EMPHASIS) {
+
+					req_drv_setting_changed = true;
+					break;
+				}
+			}
+
+			if (req_drv_setting_changed) {
+				update_drive_settings(
+					lt_settings,req_settings);
+
+				dc_link_dp_set_drive_settings(link,
+						lt_settings);
+				break;
+			}
+
+			msleep(1);
+		}
+
+		if (!req_drv_setting_changed) {
+			dm_logger_write(link->ctx->logger, LOG_WARNING,
+				"%s: Post Link Training Adjust Request Timed out\n",
+				__func__);
+
+			ASSERT(0);
+			return true;
+		}
+	}
+	dm_logger_write(link->ctx->logger, LOG_WARNING,
+		"%s: Post Link Training Adjust Request limit reached\n",
+		__func__);
+
+	ASSERT(0);
+	return true;
+
+}
+
+static enum hw_dp_training_pattern get_supported_tp(struct dc_link *link)
+{
+	enum hw_dp_training_pattern highest_tp = HW_DP_TRAINING_PATTERN_2;
+	struct encoder_feature_support *features = &link->link_enc->features;
+	struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
+
+	if (features->flags.bits.IS_TPS3_CAPABLE)
+		highest_tp = HW_DP_TRAINING_PATTERN_3;
+
+	if (features->flags.bits.IS_TPS4_CAPABLE)
+		highest_tp = HW_DP_TRAINING_PATTERN_4;
+
+	if (dpcd_caps->max_down_spread.bits.TPS4_SUPPORTED &&
+		highest_tp >= HW_DP_TRAINING_PATTERN_4)
+		return HW_DP_TRAINING_PATTERN_4;
+
+	if (dpcd_caps->max_ln_count.bits.TPS3_SUPPORTED &&
+		highest_tp >= HW_DP_TRAINING_PATTERN_3)
+		return HW_DP_TRAINING_PATTERN_3;
+
+	return HW_DP_TRAINING_PATTERN_2;
+}
+
+static enum link_training_result perform_channel_equalization_sequence(
+	struct dc_link *link,
+	struct link_training_settings *lt_settings)
+{
+	struct link_training_settings req_settings;
+	enum hw_dp_training_pattern hw_tr_pattern;
+	uint32_t retries_ch_eq;
+	enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+	union lane_align_status_updated dpcd_lane_status_updated = {{0}};
+	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};;
+
+	hw_tr_pattern = get_supported_tp(link);
+
+	dp_set_hw_training_pattern(link, hw_tr_pattern);
+
+	for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
+		retries_ch_eq++) {
+
+		dp_set_hw_lane_settings(link, lt_settings);
+
+		/* 2. update DPCD*/
+		if (!retries_ch_eq)
+			/* EPR #361076 - write as a 5-byte burst,
+			 * but only for the 1-st iteration*/
+			dpcd_set_lt_pattern_and_lane_settings(
+				link,
+				lt_settings,
+				hw_tr_pattern);
+		else
+			dpcd_set_lane_settings(link, lt_settings);
+
+		/* 3. wait for receiver to lock-on*/
+		wait_for_training_aux_rd_interval(link, 400);
+
+		/* 4. Read lane status and requested
+		 * drive settings as set by the sink*/
+
+		get_lane_status_and_drive_settings(
+			link,
+			lt_settings,
+			dpcd_lane_status,
+			&dpcd_lane_status_updated,
+			&req_settings);
+
+		/* 5. check CR done*/
+		if (!is_cr_done(lane_count, dpcd_lane_status))
+			return LINK_TRAINING_EQ_FAIL_CR;
+
+		/* 6. check CHEQ done*/
+		if (is_ch_eq_done(lane_count,
+			dpcd_lane_status,
+			&dpcd_lane_status_updated))
+			return LINK_TRAINING_SUCCESS;
+
+		/* 7. update VS/PE/PC2 in lt_settings*/
+		update_drive_settings(lt_settings, req_settings);
+	}
+
+	return LINK_TRAINING_EQ_FAIL_EQ;
+
+}
+
+static bool perform_clock_recovery_sequence(
+	struct dc_link *link,
+	struct link_training_settings *lt_settings)
+{
+	uint32_t retries_cr;
+	uint32_t retry_count;
+	uint32_t lane;
+	struct link_training_settings req_settings;
+	enum dc_lane_count lane_count =
+	lt_settings->link_settings.lane_count;
+	enum hw_dp_training_pattern hw_tr_pattern = HW_DP_TRAINING_PATTERN_1;
+	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+	union lane_align_status_updated dpcd_lane_status_updated;
+
+	retries_cr = 0;
+	retry_count = 0;
+	/* initial drive setting (VS/PE/PC2)*/
+	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+		lt_settings->lane_settings[lane].VOLTAGE_SWING =
+		VOLTAGE_SWING_LEVEL0;
+		lt_settings->lane_settings[lane].PRE_EMPHASIS =
+		PRE_EMPHASIS_DISABLED;
+		lt_settings->lane_settings[lane].POST_CURSOR2 =
+		POST_CURSOR2_DISABLED;
+	}
+
+	dp_set_hw_training_pattern(link, hw_tr_pattern);
+
+	/* najeeb - The synaptics MST hub can put the LT in
+	* infinite loop by switching the VS
+	*/
+	/* between level 0 and level 1 continuously, here
+	* we try for CR lock for LinkTrainingMaxCRRetry count*/
+	while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+	(retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
+		memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
+		memset(&dpcd_lane_status_updated, '\0',
+		sizeof(dpcd_lane_status_updated));
+
+		/* 1. call HWSS to set lane settings*/
+		dp_set_hw_lane_settings(
+				link,
+				lt_settings);
+
+		/* 2. update DPCD of the receiver*/
+		if (!retries_cr)
+			/* EPR #361076 - write as a 5-byte burst,
+			 * but only for the 1-st iteration.*/
+			dpcd_set_lt_pattern_and_lane_settings(
+					link,
+					lt_settings,
+					hw_tr_pattern);
+		else
+			dpcd_set_lane_settings(
+					link,
+					lt_settings);
+
+		/* 3. wait receiver to lock-on*/
+		wait_for_training_aux_rd_interval(
+				link,
+				100);
+
+		/* 4. Read lane status and requested drive
+		* settings as set by the sink
+		*/
+		get_lane_status_and_drive_settings(
+				link,
+				lt_settings,
+				dpcd_lane_status,
+				&dpcd_lane_status_updated,
+				&req_settings);
+
+		/* 5. check CR done*/
+		if (is_cr_done(lane_count, dpcd_lane_status))
+			return true;
+
+		/* 6. max VS reached*/
+		if (is_max_vs_reached(lt_settings))
+			return false;
+
+		/* 7. same voltage*/
+		/* Note: VS same for all lanes,
+		* so comparing first lane is sufficient*/
+		if (lt_settings->lane_settings[0].VOLTAGE_SWING ==
+			req_settings.lane_settings[0].VOLTAGE_SWING)
+			retries_cr++;
+		else
+			retries_cr = 0;
+
+		/* 8. update VS/PE/PC2 in lt_settings*/
+		update_drive_settings(lt_settings, req_settings);
+
+		retry_count++;
+	}
+
+	if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
+		ASSERT(0);
+		dm_logger_write(link->ctx->logger, LOG_ERROR,
+			"%s: Link Training Error, could not \
+			 get CR after %d tries. \
+			Possibly voltage swing issue", __func__,
+			LINK_TRAINING_MAX_CR_RETRY);
+
+	}
+
+	return false;
+}
+
+static inline bool perform_link_training_int(
+	struct dc_link *link,
+	struct link_training_settings *lt_settings,
+	bool status)
+{
+	union lane_count_set lane_count_set = { {0} };
+	union dpcd_training_pattern dpcd_pattern = { {0} };
+
+	/* 3. set training not in progress*/
+	dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
+	dpcd_set_training_pattern(link, dpcd_pattern);
+
+	/* 4. mainlink output idle pattern*/
+	dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+
+	/*
+	 * 5. post training adjust if required
+	 * If the upstream DPTX and downstream DPRX both support TPS4,
+	 * TPS4 must be used instead of POST_LT_ADJ_REQ.
+	 */
+	if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
+			get_supported_tp(link) == HW_DP_TRAINING_PATTERN_4)
+		return status;
+
+	if (status &&
+		perform_post_lt_adj_req_sequence(link, lt_settings) == false)
+		status = false;
+
+	lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
+	lane_count_set.bits.ENHANCED_FRAMING = 1;
+	lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
+
+	core_link_write_dpcd(
+		link,
+		DP_LANE_COUNT_SET,
+		&lane_count_set.raw,
+		sizeof(lane_count_set));
+
+	return status;
+}
+
+enum link_training_result dc_link_dp_perform_link_training(
+	struct dc_link *link,
+	const struct dc_link_settings *link_setting,
+	bool skip_video_pattern)
+{
+	enum link_training_result status = LINK_TRAINING_SUCCESS;
+
+	char *link_rate = "Unknown";
+	struct link_training_settings lt_settings;
+
+	memset(&lt_settings, '\0', sizeof(lt_settings));
+
+	lt_settings.link_settings.link_rate = link_setting->link_rate;
+	lt_settings.link_settings.lane_count = link_setting->lane_count;
+
+	/*@todo[vdevulap] move SS to LS, should not be handled by displaypath*/
+
+	/* TODO hard coded to SS for now
+	 * lt_settings.link_settings.link_spread =
+	 * dal_display_path_is_ss_supported(
+	 * path_mode->display_path) ?
+	 * LINK_SPREAD_05_DOWNSPREAD_30KHZ :
+	 * LINK_SPREAD_DISABLED;
+	 */
+	lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+
+	/* 1. set link rate, lane count and spread*/
+	dpcd_set_link_settings(link, &lt_settings);
+
+	/* 2. perform link training (set link training done
+	 *  to false is done as well)*/
+	if (!perform_clock_recovery_sequence(link, &lt_settings)) {
+		status = LINK_TRAINING_CR_FAIL;
+	} else {
+		status = perform_channel_equalization_sequence(link,
+				&lt_settings);
+	}
+
+	if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
+		if (!perform_link_training_int(link,
+				&lt_settings,
+				status == LINK_TRAINING_SUCCESS)) {
+			/* the next link training setting in this case
+			 * would be the same as CR failure case.
+			 */
+			status = LINK_TRAINING_CR_FAIL;
+		}
+	}
+
+	/* 6. print status message*/
+	switch (lt_settings.link_settings.link_rate) {
+
+	case LINK_RATE_LOW:
+		link_rate = "RBR";
+		break;
+	case LINK_RATE_HIGH:
+		link_rate = "HBR";
+		break;
+	case LINK_RATE_HIGH2:
+		link_rate = "HBR2";
+		break;
+	case LINK_RATE_RBR2:
+		link_rate = "RBR2";
+		break;
+	case LINK_RATE_HIGH3:
+		link_rate = "HBR3";
+		break;
+	default:
+		break;
+	}
+
+	/* Connectivity log: link training */
+	CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d",
+			link_rate,
+			lt_settings.link_settings.lane_count,
+			(status ==  LINK_TRAINING_SUCCESS) ? "pass" :
+			((status == LINK_TRAINING_CR_FAIL) ? "CR failed" :
+			"EQ failed"),
+			lt_settings.lane_settings[0].VOLTAGE_SWING,
+			lt_settings.lane_settings[0].PRE_EMPHASIS);
+
+	return status;
+}
+
+
+bool perform_link_training_with_retries(
+	struct dc_link *link,
+	const struct dc_link_settings *link_setting,
+	bool skip_video_pattern,
+	int attempts)
+{
+	uint8_t j;
+	uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
+
+	for (j = 0; j < attempts; ++j) {
+
+		if (dc_link_dp_perform_link_training(
+				link,
+				link_setting,
+				skip_video_pattern) == LINK_TRAINING_SUCCESS)
+			return true;
+
+		msleep(delay_between_attempts);
+		delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
+	}
+
+	return false;
+}
+
+static struct dc_link_settings get_max_link_cap(struct dc_link *link)
+{
+	/* Set Default link settings */
+	struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+			LINK_SPREAD_05_DOWNSPREAD_30KHZ};
+
+	/* Higher link settings based on feature supported */
+	if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
+		max_link_cap.link_rate = LINK_RATE_HIGH2;
+
+	if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
+		max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+	/* Lower link settings based on sink's link cap */
+	if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
+		max_link_cap.lane_count =
+				link->reported_link_cap.lane_count;
+	if (link->reported_link_cap.link_rate < max_link_cap.link_rate)
+		max_link_cap.link_rate =
+				link->reported_link_cap.link_rate;
+	if (link->reported_link_cap.link_spread <
+			max_link_cap.link_spread)
+		max_link_cap.link_spread =
+				link->reported_link_cap.link_spread;
+	return max_link_cap;
+}
+
+bool dp_hbr_verify_link_cap(
+	struct dc_link *link,
+	struct dc_link_settings *known_limit_link_setting)
+{
+	struct dc_link_settings max_link_cap = {0};
+	struct dc_link_settings cur_link_setting = {0};
+	struct dc_link_settings *cur = &cur_link_setting;
+	struct dc_link_settings initial_link_settings = {0};
+	bool success;
+	bool skip_link_training;
+	bool skip_video_pattern;
+	struct clock_source *dp_cs;
+	enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
+	enum link_training_result status;
+
+	success = false;
+	skip_link_training = false;
+
+	max_link_cap = get_max_link_cap(link);
+
+	/* TODO implement override and monitor patch later */
+
+	/* try to train the link from high to low to
+	 * find the physical link capability
+	 */
+	/* disable PHY done possible by BIOS, will be done by driver itself */
+	dp_disable_link_phy(link, link->connector_signal);
+
+	dp_cs = link->dc->res_pool->dp_clock_source;
+
+	if (dp_cs)
+		dp_cs_id = dp_cs->id;
+	else {
+		/*
+		 * dp clock source is not initialized for some reason.
+		 * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used
+		 */
+		ASSERT(dp_cs);
+	}
+
+	/* link training starts with the maximum common settings
+	 * supported by both sink and ASIC.
+	 */
+	initial_link_settings = get_common_supported_link_settings(
+			*known_limit_link_setting,
+			max_link_cap);
+	cur_link_setting = initial_link_settings;
+	do {
+		skip_video_pattern = true;
+
+		if (cur->link_rate == LINK_RATE_LOW)
+			skip_video_pattern = false;
+
+		dp_enable_link_phy(
+				link,
+				link->connector_signal,
+				dp_cs_id,
+				cur);
+
+		if (skip_link_training)
+			success = true;
+		else {
+			status = dc_link_dp_perform_link_training(
+							link,
+							cur,
+							skip_video_pattern);
+			if (status == LINK_TRAINING_SUCCESS)
+				success = true;
+		}
+
+		if (success)
+			link->verified_link_cap = *cur;
+
+		/* always disable the link before trying another
+		 * setting or before returning we'll enable it later
+		 * based on the actual mode we're driving
+		 */
+		dp_disable_link_phy(link, link->connector_signal);
+	} while (!success && decide_fallback_link_setting(
+			initial_link_settings, cur, status));
+
+	/* Link Training failed for all Link Settings
+	 *  (Lane Count is still unknown)
+	 */
+	if (!success) {
+		/* If all LT fails for all settings,
+		 * set verified = failed safe (1 lane low)
+		 */
+		link->verified_link_cap.lane_count = LANE_COUNT_ONE;
+		link->verified_link_cap.link_rate = LINK_RATE_LOW;
+
+		link->verified_link_cap.link_spread =
+		LINK_SPREAD_DISABLED;
+	}
+
+
+	return success;
+}
+
+static struct dc_link_settings get_common_supported_link_settings (
+		struct dc_link_settings link_setting_a,
+		struct dc_link_settings link_setting_b)
+{
+	struct dc_link_settings link_settings = {0};
+
+	link_settings.lane_count =
+		(link_setting_a.lane_count <=
+			link_setting_b.lane_count) ?
+			link_setting_a.lane_count :
+			link_setting_b.lane_count;
+	link_settings.link_rate =
+		(link_setting_a.link_rate <=
+			link_setting_b.link_rate) ?
+			link_setting_a.link_rate :
+			link_setting_b.link_rate;
+	link_settings.link_spread = LINK_SPREAD_DISABLED;
+
+	/* in DP compliance test, DPR-120 may have
+	 * a random value in its MAX_LINK_BW dpcd field.
+	 * We map it to the maximum supported link rate that
+	 * is smaller than MAX_LINK_BW in this case.
+	 */
+	if (link_settings.link_rate > LINK_RATE_HIGH3) {
+		link_settings.link_rate = LINK_RATE_HIGH3;
+	} else if (link_settings.link_rate < LINK_RATE_HIGH3
+			&& link_settings.link_rate > LINK_RATE_HIGH2) {
+		link_settings.link_rate = LINK_RATE_HIGH2;
+	} else if (link_settings.link_rate < LINK_RATE_HIGH2
+			&& link_settings.link_rate > LINK_RATE_HIGH) {
+		link_settings.link_rate = LINK_RATE_HIGH;
+	} else if (link_settings.link_rate < LINK_RATE_HIGH
+			&& link_settings.link_rate > LINK_RATE_LOW) {
+		link_settings.link_rate = LINK_RATE_LOW;
+	} else if (link_settings.link_rate < LINK_RATE_LOW) {
+		link_settings.link_rate = LINK_RATE_UNKNOWN;
+	}
+
+	return link_settings;
+}
+
+static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count)
+{
+	return lane_count <= LANE_COUNT_ONE;
+}
+
+static inline bool reached_minimum_link_rate(enum dc_link_rate link_rate)
+{
+	return link_rate <= LINK_RATE_LOW;
+}
+
+static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count)
+{
+	switch (lane_count) {
+	case LANE_COUNT_FOUR:
+		return LANE_COUNT_TWO;
+	case LANE_COUNT_TWO:
+		return LANE_COUNT_ONE;
+	case LANE_COUNT_ONE:
+		return LANE_COUNT_UNKNOWN;
+	default:
+		return LANE_COUNT_UNKNOWN;
+	}
+}
+
+static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate)
+{
+	switch (link_rate) {
+	case LINK_RATE_HIGH3:
+		return LINK_RATE_HIGH2;
+	case LINK_RATE_HIGH2:
+		return LINK_RATE_HIGH;
+	case LINK_RATE_HIGH:
+		return LINK_RATE_LOW;
+	case LINK_RATE_LOW:
+		return LINK_RATE_UNKNOWN;
+	default:
+		return LINK_RATE_UNKNOWN;
+	}
+}
+
+static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count)
+{
+	switch (lane_count) {
+	case LANE_COUNT_ONE:
+		return LANE_COUNT_TWO;
+	case LANE_COUNT_TWO:
+		return LANE_COUNT_FOUR;
+	default:
+		return LANE_COUNT_UNKNOWN;
+	}
+}
+
+static enum dc_link_rate increase_link_rate(enum dc_link_rate link_rate)
+{
+	switch (link_rate) {
+	case LINK_RATE_LOW:
+		return LINK_RATE_HIGH;
+	case LINK_RATE_HIGH:
+		return LINK_RATE_HIGH2;
+	case LINK_RATE_HIGH2:
+		return LINK_RATE_HIGH3;
+	default:
+		return LINK_RATE_UNKNOWN;
+	}
+}
+
+/*
+ * function: set link rate and lane count fallback based
+ * on current link setting and last link training result
+ * return value:
+ *			true - link setting could be set
+ *			false - has reached minimum setting
+ *					and no further fallback could be done
+ */
+static bool decide_fallback_link_setting(
+		struct dc_link_settings initial_link_settings,
+		struct dc_link_settings *current_link_setting,
+		enum link_training_result training_result)
+{
+	if (!current_link_setting)
+		return false;
+
+	switch (training_result) {
+	case LINK_TRAINING_CR_FAIL:
+	{
+		if (!reached_minimum_link_rate
+				(current_link_setting->link_rate)) {
+			current_link_setting->link_rate =
+				reduce_link_rate(
+					current_link_setting->link_rate);
+		} else if (!reached_minimum_lane_count
+				(current_link_setting->lane_count)) {
+			current_link_setting->link_rate =
+				initial_link_settings.link_rate;
+			current_link_setting->lane_count =
+				reduce_lane_count(
+					current_link_setting->lane_count);
+		} else {
+			return false;
+		}
+		break;
+	}
+	case LINK_TRAINING_EQ_FAIL_EQ:
+	{
+		if (!reached_minimum_lane_count
+				(current_link_setting->lane_count)) {
+			current_link_setting->lane_count =
+				reduce_lane_count(
+					current_link_setting->lane_count);
+		} else if (!reached_minimum_link_rate
+				(current_link_setting->link_rate)) {
+			current_link_setting->link_rate =
+				reduce_link_rate(
+					current_link_setting->link_rate);
+		} else {
+			return false;
+		}
+		break;
+	}
+	case LINK_TRAINING_EQ_FAIL_CR:
+	{
+		if (!reached_minimum_link_rate
+				(current_link_setting->link_rate)) {
+			current_link_setting->link_rate =
+				reduce_link_rate(
+					current_link_setting->link_rate);
+		} else {
+			return false;
+		}
+		break;
+	}
+	default:
+		return false;
+	}
+	return true;
+}
+
+static uint32_t bandwidth_in_kbps_from_timing(
+	const struct dc_crtc_timing *timing)
+{
+	uint32_t bits_per_channel = 0;
+	uint32_t kbps;
+	switch (timing->display_color_depth) {
+
+	case COLOR_DEPTH_666:
+		bits_per_channel = 6;
+		break;
+	case COLOR_DEPTH_888:
+		bits_per_channel = 8;
+		break;
+	case COLOR_DEPTH_101010:
+		bits_per_channel = 10;
+		break;
+	case COLOR_DEPTH_121212:
+		bits_per_channel = 12;
+		break;
+	case COLOR_DEPTH_141414:
+		bits_per_channel = 14;
+		break;
+	case COLOR_DEPTH_161616:
+		bits_per_channel = 16;
+		break;
+	default:
+		break;
+	}
+	ASSERT(bits_per_channel != 0);
+
+	kbps = timing->pix_clk_khz;
+	kbps *= bits_per_channel;
+
+	if (timing->flags.Y_ONLY != 1)
+		/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+		kbps *= 3;
+
+	return kbps;
+
+}
+
+static uint32_t bandwidth_in_kbps_from_link_settings(
+	const struct dc_link_settings *link_setting)
+{
+	uint32_t link_rate_in_kbps = link_setting->link_rate *
+		LINK_RATE_REF_FREQ_IN_KHZ;
+
+	uint32_t lane_count  = link_setting->lane_count;
+	uint32_t kbps = link_rate_in_kbps;
+	kbps *= lane_count;
+	kbps *= 8;   /* 8 bits per byte*/
+
+	return kbps;
+
+}
+
+bool dp_validate_mode_timing(
+	struct dc_link *link,
+	const struct dc_crtc_timing *timing)
+{
+	uint32_t req_bw;
+	uint32_t max_bw;
+
+	const struct dc_link_settings *link_setting;
+
+	/*always DP fail safe mode*/
+	if (timing->pix_clk_khz == (uint32_t)25175 &&
+		timing->h_addressable == (uint32_t)640 &&
+		timing->v_addressable == (uint32_t)480)
+		return true;
+
+	/* We always use verified link settings */
+	link_setting = &link->verified_link_cap;
+
+	/* TODO: DYNAMIC_VALIDATION needs to be implemented */
+	/*if (flags.DYNAMIC_VALIDATION == 1 &&
+		link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN)
+		link_setting = &link->verified_link_cap;
+	*/
+
+	req_bw = bandwidth_in_kbps_from_timing(timing);
+	max_bw = bandwidth_in_kbps_from_link_settings(link_setting);
+
+	if (req_bw <= max_bw) {
+		/* remember the biggest mode here, during
+		 * initial link training (to get
+		 * verified_link_cap), LS sends event about
+		 * cannot train at reported cap to upper
+		 * layer and upper layer will re-enumerate modes.
+		 * this is not necessary if the lower
+		 * verified_link_cap is enough to drive
+		 * all the modes */
+
+		/* TODO: DYNAMIC_VALIDATION needs to be implemented */
+		/* if (flags.DYNAMIC_VALIDATION == 1)
+			dpsst->max_req_bw_for_verified_linkcap = dal_max(
+				dpsst->max_req_bw_for_verified_linkcap, req_bw); */
+		return true;
+	} else
+		return false;
+}
+
+void decide_link_settings(struct dc_stream_state *stream,
+	struct dc_link_settings *link_setting)
+{
+
+	struct dc_link_settings initial_link_setting = {
+		LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED};
+	struct dc_link_settings current_link_setting =
+			initial_link_setting;
+	struct dc_link *link;
+	uint32_t req_bw;
+	uint32_t link_bw;
+
+	req_bw = bandwidth_in_kbps_from_timing(&stream->timing);
+
+	link = stream->sink->link;
+
+	/* if preferred is specified through AMDDP, use it, if it's enough
+	 * to drive the mode
+	 */
+	if (link->preferred_link_setting.lane_count !=
+			LANE_COUNT_UNKNOWN &&
+			link->preferred_link_setting.link_rate !=
+					LINK_RATE_UNKNOWN) {
+		*link_setting =  link->preferred_link_setting;
+		return;
+	}
+
+	/* MST doesn't perform link training for now
+	 * TODO: add MST specific link training routine
+	 */
+	if (is_mst_supported(link)) {
+		*link_setting = link->verified_link_cap;
+		return;
+	}
+
+	/* search for the minimum link setting that:
+	 * 1. is supported according to the link training result
+	 * 2. could support the b/w requested by the timing
+	 */
+	while (current_link_setting.link_rate <=
+			link->verified_link_cap.link_rate) {
+		link_bw = bandwidth_in_kbps_from_link_settings(
+				&current_link_setting);
+		if (req_bw <= link_bw) {
+			*link_setting = current_link_setting;
+			return;
+		}
+
+		if (current_link_setting.lane_count <
+				link->verified_link_cap.lane_count) {
+			current_link_setting.lane_count =
+					increase_lane_count(
+							current_link_setting.lane_count);
+		} else {
+			current_link_setting.link_rate =
+					increase_link_rate(
+							current_link_setting.link_rate);
+			current_link_setting.lane_count =
+					initial_link_setting.lane_count;
+		}
+	}
+
+	BREAK_TO_DEBUGGER();
+	ASSERT(link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN);
+
+	*link_setting = link->verified_link_cap;
+}
+
+/*************************Short Pulse IRQ***************************/
+
+static bool hpd_rx_irq_check_link_loss_status(
+	struct dc_link *link,
+	union hpd_irq_data *hpd_irq_dpcd_data)
+{
+	uint8_t irq_reg_rx_power_state;
+	enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
+	union lane_status lane_status;
+	uint32_t lane;
+	bool sink_status_changed;
+	bool return_code;
+
+	sink_status_changed = false;
+	return_code = false;
+
+	if (link->cur_link_settings.lane_count == 0)
+		return return_code;
+	/*1. Check that we can handle interrupt: Not in FS DOS,
+	 *  Not in "Display Timeout" state, Link is trained.
+	 */
+
+	dpcd_result = core_link_read_dpcd(link,
+		DP_SET_POWER,
+		&irq_reg_rx_power_state,
+		sizeof(irq_reg_rx_power_state));
+
+	if (dpcd_result != DC_OK) {
+		irq_reg_rx_power_state = DP_SET_POWER_D0;
+		dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+			"%s: DPCD read failed to obtain power state.\n",
+			__func__);
+	}
+
+	if (irq_reg_rx_power_state == DP_SET_POWER_D0) {
+
+		/*2. Check that Link Status changed, before re-training.*/
+
+		/*parse lane status*/
+		for (lane = 0;
+			lane < link->cur_link_settings.lane_count;
+			lane++) {
+
+			/* check status of lanes 0,1
+			 * changed DpcdAddress_Lane01Status (0x202)*/
+			lane_status.raw = get_nibble_at_index(
+				&hpd_irq_dpcd_data->bytes.lane01_status.raw,
+				lane);
+
+			if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+				!lane_status.bits.CR_DONE_0 ||
+				!lane_status.bits.SYMBOL_LOCKED_0) {
+				/* if one of the channel equalization, clock
+				 * recovery or symbol lock is dropped
+				 * consider it as (link has been
+				 * dropped) dp sink status has changed*/
+				sink_status_changed = true;
+				break;
+			}
+
+		}
+
+		/* Check interlane align.*/
+		if (sink_status_changed ||
+			!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.
+			INTERLANE_ALIGN_DONE) {
+
+			dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+				"%s: Link Status changed.\n",
+				__func__);
+
+			return_code = true;
+		}
+	}
+
+	return return_code;
+}
+
+static enum dc_status read_hpd_rx_irq_data(
+	struct dc_link *link,
+	union hpd_irq_data *irq_data)
+{
+	/* The HW reads 16 bytes from 200h on HPD,
+	 * but if we get an AUX_DEFER, the HW cannot retry
+	 * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
+	 * fail, so we now explicitly read 6 bytes which is
+	 * the req from the above mentioned test cases.
+	 */
+	return core_link_read_dpcd(
+	link,
+	DP_SINK_COUNT,
+	irq_data->raw,
+	sizeof(union hpd_irq_data));
+}
+
+static bool allow_hpd_rx_irq(const struct dc_link *link)
+{
+	/*
+	 * Don't handle RX IRQ unless one of following is met:
+	 * 1) The link is established (cur_link_settings != unknown)
+	 * 2) We kicked off MST detection
+	 * 3) We know we're dealing with an active dongle
+	 */
+
+	if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
+		(link->type == dc_connection_mst_branch) ||
+		is_dp_active_dongle(link))
+		return true;
+
+	return false;
+}
+
+static bool handle_hpd_irq_psr_sink(const struct dc_link *link)
+{
+	union dpcd_psr_configuration psr_configuration;
+
+	if (!link->psr_enabled)
+		return false;
+
+	dm_helpers_dp_read_dpcd(
+		link->ctx,
+		link,
+		368,/*DpcdAddress_PSR_Enable_Cfg*/
+		&psr_configuration.raw,
+		sizeof(psr_configuration.raw));
+
+
+	if (psr_configuration.bits.ENABLE) {
+		unsigned char dpcdbuf[3] = {0};
+		union psr_error_status psr_error_status;
+		union psr_sink_psr_status psr_sink_psr_status;
+
+		dm_helpers_dp_read_dpcd(
+			link->ctx,
+			link,
+			0x2006, /*DpcdAddress_PSR_Error_Status*/
+			(unsigned char *) dpcdbuf,
+			sizeof(dpcdbuf));
+
+		/*DPCD 2006h   ERROR STATUS*/
+		psr_error_status.raw = dpcdbuf[0];
+		/*DPCD 2008h   SINK PANEL SELF REFRESH STATUS*/
+		psr_sink_psr_status.raw = dpcdbuf[2];
+
+		if (psr_error_status.bits.LINK_CRC_ERROR ||
+				psr_error_status.bits.RFB_STORAGE_ERROR) {
+			/* Acknowledge and clear error bits */
+			dm_helpers_dp_write_dpcd(
+				link->ctx,
+				link,
+				8198,/*DpcdAddress_PSR_Error_Status*/
+				&psr_error_status.raw,
+				sizeof(psr_error_status.raw));
+
+			/* PSR error, disable and re-enable PSR */
+			dc_link_set_psr_enable(link, false, true);
+			dc_link_set_psr_enable(link, true, true);
+
+			return true;
+		} else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
+				PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB){
+			/* No error is detect, PSR is active.
+			 * We should return with IRQ_HPD handled without
+			 * checking for loss of sync since PSR would have
+			 * powered down main link.
+			 */
+			return true;
+		}
+	}
+	return false;
+}
+
+static void dp_test_send_link_training(struct dc_link *link)
+{
+	struct dc_link_settings link_settings = {0};
+
+	core_link_read_dpcd(
+			link,
+			DP_TEST_LANE_COUNT,
+			(unsigned char *)(&link_settings.lane_count),
+			1);
+	core_link_read_dpcd(
+			link,
+			DP_TEST_LINK_RATE,
+			(unsigned char *)(&link_settings.link_rate),
+			1);
+
+	/* Set preferred link settings */
+	link->verified_link_cap.lane_count = link_settings.lane_count;
+	link->verified_link_cap.link_rate = link_settings.link_rate;
+
+	dp_retrain_link_dp_test(link, &link_settings, false);
+}
+
+/* TODO hbr2 compliance eye output is unstable
+ * (toggling on and off) with debugger break
+ * This caueses intermittent PHY automation failure
+ * Need to look into the root cause */
+static uint8_t force_tps4_for_cp2520 = 1;
+
+static void dp_test_send_phy_test_pattern(struct dc_link *link)
+{
+	union phy_test_pattern dpcd_test_pattern;
+	union lane_adjust dpcd_lane_adjustment[2];
+	unsigned char dpcd_post_cursor_2_adjustment = 0;
+	unsigned char test_80_bit_pattern[
+			(DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
+			DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0};
+	enum dp_test_pattern test_pattern;
+	struct dc_link_training_settings link_settings;
+	union lane_adjust dpcd_lane_adjust;
+	unsigned int lane;
+	struct link_training_settings link_training_settings;
+	int i = 0;
+
+	dpcd_test_pattern.raw = 0;
+	memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment));
+	memset(&link_settings, 0, sizeof(link_settings));
+
+	/* get phy test pattern and pattern parameters from DP receiver */
+	core_link_read_dpcd(
+			link,
+			DP_TEST_PHY_PATTERN,
+			&dpcd_test_pattern.raw,
+			sizeof(dpcd_test_pattern));
+	core_link_read_dpcd(
+			link,
+			DP_ADJUST_REQUEST_LANE0_1,
+			&dpcd_lane_adjustment[0].raw,
+			sizeof(dpcd_lane_adjustment));
+
+	/*get post cursor 2 parameters
+	 * For DP 1.1a or eariler, this DPCD register's value is 0
+	 * For DP 1.2 or later:
+	 * Bits 1:0 = POST_CURSOR2_LANE0; Bits 3:2 = POST_CURSOR2_LANE1
+	 * Bits 5:4 = POST_CURSOR2_LANE2; Bits 7:6 = POST_CURSOR2_LANE3
+	 */
+	core_link_read_dpcd(
+			link,
+			DP_ADJUST_REQUEST_POST_CURSOR2,
+			&dpcd_post_cursor_2_adjustment,
+			sizeof(dpcd_post_cursor_2_adjustment));
+
+	/* translate request */
+	switch (dpcd_test_pattern.bits.PATTERN) {
+	case PHY_TEST_PATTERN_D10_2:
+		test_pattern = DP_TEST_PATTERN_D102;
+		break;
+	case PHY_TEST_PATTERN_SYMBOL_ERROR:
+		test_pattern = DP_TEST_PATTERN_SYMBOL_ERROR;
+		break;
+	case PHY_TEST_PATTERN_PRBS7:
+		test_pattern = DP_TEST_PATTERN_PRBS7;
+		break;
+	case PHY_TEST_PATTERN_80BIT_CUSTOM:
+		test_pattern = DP_TEST_PATTERN_80BIT_CUSTOM;
+		break;
+	case PHY_TEST_PATTERN_CP2520_1:
+		/* CP2520 pattern is unstable, temporarily use TPS4 instead */
+		test_pattern = (force_tps4_for_cp2520 == 1) ?
+				DP_TEST_PATTERN_TRAINING_PATTERN4 :
+				DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
+		break;
+	case PHY_TEST_PATTERN_CP2520_2:
+		/* CP2520 pattern is unstable, temporarily use TPS4 instead */
+		test_pattern = (force_tps4_for_cp2520 == 1) ?
+				DP_TEST_PATTERN_TRAINING_PATTERN4 :
+				DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
+		break;
+	case PHY_TEST_PATTERN_CP2520_3:
+		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
+		break;
+	default:
+		test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+	break;
+	}
+
+	if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM)
+		core_link_read_dpcd(
+				link,
+				DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
+				test_80_bit_pattern,
+				sizeof(test_80_bit_pattern));
+
+	/* prepare link training settings */
+	link_settings.link = link->cur_link_settings;
+
+	for (lane = 0; lane <
+		(unsigned int)(link->cur_link_settings.lane_count);
+		lane++) {
+		dpcd_lane_adjust.raw =
+			get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane);
+		link_settings.lane_settings[lane].VOLTAGE_SWING =
+			(enum dc_voltage_swing)
+			(dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
+		link_settings.lane_settings[lane].PRE_EMPHASIS =
+			(enum dc_pre_emphasis)
+			(dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
+		link_settings.lane_settings[lane].POST_CURSOR2 =
+			(enum dc_post_cursor2)
+			((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
+	}
+
+	for (i = 0; i < 4; i++)
+		link_training_settings.lane_settings[i] =
+				link_settings.lane_settings[i];
+	link_training_settings.link_settings = link_settings.link;
+	link_training_settings.allow_invalid_msa_timing_param = false;
+	/*Usage: Measure DP physical lane signal
+	 * by DP SI test equipment automatically.
+	 * PHY test pattern request is generated by equipment via HPD interrupt.
+	 * HPD needs to be active all the time. HPD should be active
+	 * all the time. Do not touch it.
+	 * forward request to DS
+	 */
+	dc_link_dp_set_test_pattern(
+		link,
+		test_pattern,
+		&link_training_settings,
+		test_80_bit_pattern,
+		(DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
+		DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1);
+}
+
+static void dp_test_send_link_test_pattern(struct dc_link *link)
+{
+	union link_test_pattern dpcd_test_pattern;
+	union test_misc dpcd_test_params;
+	enum dp_test_pattern test_pattern;
+
+	memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
+	memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
+
+	/* get link test pattern and pattern parameters */
+	core_link_read_dpcd(
+			link,
+			DP_TEST_PATTERN,
+			&dpcd_test_pattern.raw,
+			sizeof(dpcd_test_pattern));
+	core_link_read_dpcd(
+			link,
+			DP_TEST_MISC0,
+			&dpcd_test_params.raw,
+			sizeof(dpcd_test_params));
+
+	switch (dpcd_test_pattern.bits.PATTERN) {
+	case LINK_TEST_PATTERN_COLOR_RAMP:
+		test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
+	break;
+	case LINK_TEST_PATTERN_VERTICAL_BARS:
+		test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
+	break; /* black and white */
+	case LINK_TEST_PATTERN_COLOR_SQUARES:
+		test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
+				TEST_DYN_RANGE_VESA ?
+				DP_TEST_PATTERN_COLOR_SQUARES :
+				DP_TEST_PATTERN_COLOR_SQUARES_CEA);
+	break;
+	default:
+		test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+	break;
+	}
+
+	dc_link_dp_set_test_pattern(
+			link,
+			test_pattern,
+			NULL,
+			NULL,
+			0);
+}
+
+static void handle_automated_test(struct dc_link *link)
+{
+	union test_request test_request;
+	union test_response test_response;
+
+	memset(&test_request, 0, sizeof(test_request));
+	memset(&test_response, 0, sizeof(test_response));
+
+	core_link_read_dpcd(
+		link,
+		DP_TEST_REQUEST,
+		&test_request.raw,
+		sizeof(union test_request));
+	if (test_request.bits.LINK_TRAINING) {
+		/* ACK first to let DP RX test box monitor LT sequence */
+		test_response.bits.ACK = 1;
+		core_link_write_dpcd(
+			link,
+			DP_TEST_RESPONSE,
+			&test_response.raw,
+			sizeof(test_response));
+		dp_test_send_link_training(link);
+		/* no acknowledge request is needed again */
+		test_response.bits.ACK = 0;
+	}
+	if (test_request.bits.LINK_TEST_PATTRN) {
+		dp_test_send_link_test_pattern(link);
+		test_response.bits.ACK = 1;
+	}
+	if (test_request.bits.PHY_TEST_PATTERN) {
+		dp_test_send_phy_test_pattern(link);
+		test_response.bits.ACK = 1;
+	}
+	if (!test_request.raw)
+		/* no requests, revert all test signals
+		 * TODO: revert all test signals
+		 */
+		test_response.bits.ACK = 1;
+	/* send request acknowledgment */
+	if (test_response.bits.ACK)
+		core_link_write_dpcd(
+			link,
+			DP_TEST_RESPONSE,
+			&test_response.raw,
+			sizeof(test_response));
+}
+
+bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data)
+{
+	union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
+	union device_service_irq device_service_clear = { { 0 } };
+	enum dc_status result = DDC_RESULT_UNKNOWN;
+	bool status = false;
+	/* For use cases related to down stream connection status change,
+	 * PSR and device auto test, refer to function handle_sst_hpd_irq
+	 * in DAL2.1*/
+
+	dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+		"%s: Got short pulse HPD on link %d\n",
+		__func__, link->link_index);
+
+
+	 /* All the "handle_hpd_irq_xxx()" methods
+		 * should be called only after
+		 * dal_dpsst_ls_read_hpd_irq_data
+		 * Order of calls is important too
+		 */
+	result = read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data);
+	if (out_hpd_irq_dpcd_data)
+		*out_hpd_irq_dpcd_data = hpd_irq_dpcd_data;
+
+	if (result != DC_OK) {
+		dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+			"%s: DPCD read failed to obtain irq data\n",
+			__func__);
+		return false;
+	}
+
+	if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+		device_service_clear.bits.AUTOMATED_TEST = 1;
+		core_link_write_dpcd(
+			link,
+			DP_DEVICE_SERVICE_IRQ_VECTOR,
+			&device_service_clear.raw,
+			sizeof(device_service_clear.raw));
+		device_service_clear.raw = 0;
+		handle_automated_test(link);
+		return false;
+	}
+
+	if (!allow_hpd_rx_irq(link)) {
+		dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+			"%s: skipping HPD handling on %d\n",
+			__func__, link->link_index);
+		return false;
+	}
+
+	if (handle_hpd_irq_psr_sink(link))
+		/* PSR-related error was detected and handled */
+		return true;
+
+	/* If PSR-related error handled, Main link may be off,
+	 * so do not handle as a normal sink status change interrupt.
+	 */
+
+	if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY)
+		return true;
+
+	/* check if we have MST msg and return since we poll for it */
+	if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY)
+		return false;
+
+	/* For now we only handle 'Downstream port status' case.
+	 * If we got sink count changed it means
+	 * Downstream port status changed,
+	 * then DM should call DC to do the detection. */
+	if (hpd_rx_irq_check_link_loss_status(
+		link,
+		&hpd_irq_dpcd_data)) {
+		/* Connectivity log: link loss */
+		CONN_DATA_LINK_LOSS(link,
+					hpd_irq_dpcd_data.raw,
+					sizeof(hpd_irq_dpcd_data),
+					"Status: ");
+
+		perform_link_training_with_retries(link,
+			&link->cur_link_settings,
+			true, LINK_TRAINING_ATTEMPTS);
+
+		status = false;
+	}
+
+	if (link->type == dc_connection_active_dongle &&
+		hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT
+			!= link->dpcd_sink_count)
+		status = true;
+
+	/* reasons for HPD RX:
+	 * 1. Link Loss - ie Re-train the Link
+	 * 2. MST sideband message
+	 * 3. Automated Test - ie. Internal Commit
+	 * 4. CP (copy protection) - (not interesting for DM???)
+	 * 5. DRR
+	 * 6. Downstream Port status changed
+	 * -ie. Detect - this the only one
+	 * which is interesting for DM because
+	 * it must call dc_link_detect.
+	 */
+	return status;
+}
+
+/*query dpcd for version and mst cap addresses*/
+bool is_mst_supported(struct dc_link *link)
+{
+	bool mst          = false;
+	enum dc_status st = DC_OK;
+	union dpcd_rev rev;
+	union mstm_cap cap;
+
+	rev.raw  = 0;
+	cap.raw  = 0;
+
+	st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw,
+			sizeof(rev));
+
+	if (st == DC_OK && rev.raw >= DPCD_REV_12) {
+
+		st = core_link_read_dpcd(link, DP_MSTM_CAP,
+				&cap.raw, sizeof(cap));
+		if (st == DC_OK && cap.bits.MST_CAP == 1)
+			mst = true;
+	}
+	return mst;
+
+}
+
+bool is_dp_active_dongle(const struct dc_link *link)
+{
+	enum display_dongle_type dongle_type = link->dpcd_caps.dongle_type;
+
+	return (dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) ||
+			(dongle_type == DISPLAY_DONGLE_DP_DVI_CONVERTER) ||
+			(dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
+}
+
+static void get_active_converter_info(
+	uint8_t data, struct dc_link *link)
+{
+	union dp_downstream_port_present ds_port = { .byte = data };
+
+	/* decode converter info*/
+	if (!ds_port.fields.PORT_PRESENT) {
+		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+		ddc_service_set_dongle_type(link->ddc,
+				link->dpcd_caps.dongle_type);
+		return;
+	}
+
+	switch (ds_port.fields.PORT_TYPE) {
+	case DOWNSTREAM_VGA:
+		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
+		break;
+	case DOWNSTREAM_DVI_HDMI:
+		/* At this point we don't know is it DVI or HDMI,
+		 * assume DVI.*/
+		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER;
+		break;
+	default:
+		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+		break;
+	}
+
+	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
+		uint8_t det_caps[4];
+		union dwnstream_port_caps_byte0 *port_caps =
+			(union dwnstream_port_caps_byte0 *)det_caps;
+		core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
+				det_caps, sizeof(det_caps));
+
+		switch (port_caps->bits.DWN_STRM_PORTX_TYPE) {
+		case DOWN_STREAM_DETAILED_VGA:
+			link->dpcd_caps.dongle_type =
+				DISPLAY_DONGLE_DP_VGA_CONVERTER;
+			break;
+		case DOWN_STREAM_DETAILED_DVI:
+			link->dpcd_caps.dongle_type =
+				DISPLAY_DONGLE_DP_DVI_CONVERTER;
+			break;
+		case DOWN_STREAM_DETAILED_HDMI:
+			link->dpcd_caps.dongle_type =
+				DISPLAY_DONGLE_DP_HDMI_CONVERTER;
+
+			link->dpcd_caps.dongle_caps.dongle_type = link->dpcd_caps.dongle_type;
+			if (ds_port.fields.DETAILED_CAPS) {
+
+				union dwnstream_port_caps_byte3_hdmi
+					hdmi_caps = {.raw = det_caps[3] };
+				union dwnstream_port_caps_byte1
+					hdmi_color_caps = {.raw = det_caps[2] };
+				link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk =
+					det_caps[1] * 25000;
+
+				link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
+					hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
+				link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through =
+					hdmi_caps.bits.YCrCr422_PASS_THROUGH;
+				link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through =
+					hdmi_caps.bits.YCrCr420_PASS_THROUGH;
+				link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter =
+					hdmi_caps.bits.YCrCr422_CONVERSION;
+				link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter =
+					hdmi_caps.bits.YCrCr420_CONVERSION;
+
+				link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
+					hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT;
+
+				link->dpcd_caps.dongle_caps.extendedCapValid = true;
+			}
+
+			break;
+		}
+	}
+
+	ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
+
+	{
+		struct dp_device_vendor_id dp_id;
+
+		/* read IEEE branch device id */
+		core_link_read_dpcd(
+			link,
+			DP_BRANCH_OUI,
+			(uint8_t *)&dp_id,
+			sizeof(dp_id));
+
+		link->dpcd_caps.branch_dev_id =
+			(dp_id.ieee_oui[0] << 16) +
+			(dp_id.ieee_oui[1] << 8) +
+			dp_id.ieee_oui[2];
+
+		memmove(
+			link->dpcd_caps.branch_dev_name,
+			dp_id.ieee_device_id,
+			sizeof(dp_id.ieee_device_id));
+	}
+
+	{
+		struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+
+		core_link_read_dpcd(
+			link,
+			DP_BRANCH_REVISION_START,
+			(uint8_t *)&dp_hw_fw_revision,
+			sizeof(dp_hw_fw_revision));
+
+		link->dpcd_caps.branch_hw_revision =
+			dp_hw_fw_revision.ieee_hw_rev;
+	}
+}
+
+static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
+		int length)
+{
+	int retry = 0;
+	union dp_downstream_port_present ds_port = { 0 };
+
+	if (!link->dpcd_caps.dpcd_rev.raw) {
+		do {
+			dp_receiver_power_ctrl(link, true);
+			core_link_read_dpcd(link, DP_DPCD_REV,
+							dpcd_data, length);
+			link->dpcd_caps.dpcd_rev.raw = dpcd_data[
+				DP_DPCD_REV -
+				DP_DPCD_REV];
+		} while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);
+	}
+
+	ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
+				 DP_DPCD_REV];
+
+	if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
+		switch (link->dpcd_caps.branch_dev_id) {
+		/* Some active dongles (DP-VGA, DP-DLDVI converters) power down
+		 * all internal circuits including AUX communication preventing
+		 * reading DPCD table and EDID (spec violation).
+		 * Encoder will skip DP RX power down on disable_output to
+		 * keep receiver powered all the time.*/
+		case DP_BRANCH_DEVICE_ID_1:
+		case DP_BRANCH_DEVICE_ID_4:
+			link->wa_flags.dp_keep_receiver_powered = true;
+			break;
+
+		/* TODO: May need work around for other dongles. */
+		default:
+			link->wa_flags.dp_keep_receiver_powered = false;
+			break;
+		}
+	} else
+		link->wa_flags.dp_keep_receiver_powered = false;
+}
+
+static void retrieve_link_cap(struct dc_link *link)
+{
+	uint8_t dpcd_data[DP_TRAINING_AUX_RD_INTERVAL - DP_DPCD_REV + 1];
+
+	union down_stream_port_count down_strm_port_count;
+	union edp_configuration_cap edp_config_cap;
+	union dp_downstream_port_present ds_port = { 0 };
+
+	memset(dpcd_data, '\0', sizeof(dpcd_data));
+	memset(&down_strm_port_count,
+		'\0', sizeof(union down_stream_port_count));
+	memset(&edp_config_cap, '\0',
+		sizeof(union edp_configuration_cap));
+
+	core_link_read_dpcd(
+		link,
+		DP_DPCD_REV,
+		dpcd_data,
+		sizeof(dpcd_data));
+
+	{
+		union training_aux_rd_interval aux_rd_interval;
+
+		aux_rd_interval.raw =
+			dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
+
+		if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) {
+			core_link_read_dpcd(
+				link,
+				DP_DP13_DPCD_REV,
+				dpcd_data,
+				sizeof(dpcd_data));
+		}
+	}
+
+	link->dpcd_caps.dpcd_rev.raw =
+		dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+
+	ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
+				 DP_DPCD_REV];
+
+	get_active_converter_info(ds_port.byte, link);
+
+	dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
+
+	link->dpcd_caps.allow_invalid_MSA_timing_param =
+		down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
+
+	link->dpcd_caps.max_ln_count.raw = dpcd_data[
+		DP_MAX_LANE_COUNT - DP_DPCD_REV];
+
+	link->dpcd_caps.max_down_spread.raw = dpcd_data[
+		DP_MAX_DOWNSPREAD - DP_DPCD_REV];
+
+	link->reported_link_cap.lane_count =
+		link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
+	link->reported_link_cap.link_rate = dpcd_data[
+		DP_MAX_LINK_RATE - DP_DPCD_REV];
+	link->reported_link_cap.link_spread =
+		link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
+		LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
+
+	edp_config_cap.raw = dpcd_data[
+		DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV];
+	link->dpcd_caps.panel_mode_edp =
+		edp_config_cap.bits.ALT_SCRAMBLER_RESET;
+	link->dpcd_caps.dpcd_display_control_capable =
+		edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE;
+
+	link->test_pattern_enabled = false;
+	link->compliance_test_state.raw = 0;
+
+	/* read sink count */
+	core_link_read_dpcd(link,
+			DP_SINK_COUNT,
+			&link->dpcd_caps.sink_count.raw,
+			sizeof(link->dpcd_caps.sink_count.raw));
+
+	/* Connectivity log: detection */
+	CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
+}
+
+void detect_dp_sink_caps(struct dc_link *link)
+{
+	retrieve_link_cap(link);
+
+	/* dc init_hw has power encoder using default
+	 * signal for connector. For native DP, no
+	 * need to power up encoder again. If not native
+	 * DP, hw_init may need check signal or power up
+	 * encoder here.
+	 */
+	/* TODO save sink caps in link->sink */
+}
+
+void detect_edp_sink_caps(struct dc_link *link)
+{
+	retrieve_link_cap(link);
+	link->verified_link_cap = link->reported_link_cap;
+}
+
+void dc_link_dp_enable_hpd(const struct dc_link *link)
+{
+	struct link_encoder *encoder = link->link_enc;
+
+	if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
+		encoder->funcs->enable_hpd(encoder);
+}
+
+void dc_link_dp_disable_hpd(const struct dc_link *link)
+{
+	struct link_encoder *encoder = link->link_enc;
+
+	if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
+		encoder->funcs->disable_hpd(encoder);
+}
+
+static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
+{
+	if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&
+			test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||
+			test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
+		return true;
+	else
+		return false;
+}
+
+static void set_crtc_test_pattern(struct dc_link *link,
+				struct pipe_ctx *pipe_ctx,
+				enum dp_test_pattern test_pattern)
+{
+	enum controller_dp_test_pattern controller_test_pattern;
+	enum dc_color_depth color_depth = pipe_ctx->
+		stream->timing.display_color_depth;
+	struct bit_depth_reduction_params params;
+
+	memset(&params, 0, sizeof(params));
+
+	switch (test_pattern) {
+	case DP_TEST_PATTERN_COLOR_SQUARES:
+		controller_test_pattern =
+				CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+	break;
+	case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+		controller_test_pattern =
+				CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA;
+	break;
+	case DP_TEST_PATTERN_VERTICAL_BARS:
+		controller_test_pattern =
+				CONTROLLER_DP_TEST_PATTERN_VERTICALBARS;
+	break;
+	case DP_TEST_PATTERN_HORIZONTAL_BARS:
+		controller_test_pattern =
+				CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS;
+	break;
+	case DP_TEST_PATTERN_COLOR_RAMP:
+		controller_test_pattern =
+				CONTROLLER_DP_TEST_PATTERN_COLORRAMP;
+	break;
+	default:
+		controller_test_pattern =
+				CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
+	break;
+	}
+
+	switch (test_pattern) {
+	case DP_TEST_PATTERN_COLOR_SQUARES:
+	case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+	case DP_TEST_PATTERN_VERTICAL_BARS:
+	case DP_TEST_PATTERN_HORIZONTAL_BARS:
+	case DP_TEST_PATTERN_COLOR_RAMP:
+	{
+		/* disable bit depth reduction */
+		pipe_ctx->stream->bit_depth_params = params;
+		pipe_ctx->stream_res.opp->funcs->
+			opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+
+		pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+				controller_test_pattern, color_depth);
+	}
+	break;
+	case DP_TEST_PATTERN_VIDEO_MODE:
+	{
+		/* restore bitdepth reduction */
+		resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+					&params);
+		pipe_ctx->stream->bit_depth_params = params;
+		pipe_ctx->stream_res.opp->funcs->
+			opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+
+		pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+				CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+				color_depth);
+	}
+	break;
+
+	default:
+	break;
+	}
+}
+
+bool dc_link_dp_set_test_pattern(
+	struct dc_link *link,
+	enum dp_test_pattern test_pattern,
+	const struct link_training_settings *p_link_settings,
+	const unsigned char *p_custom_pattern,
+	unsigned int cust_pattern_size)
+{
+	struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+	struct pipe_ctx *pipe_ctx = &pipes[0];
+	unsigned int lane;
+	unsigned int i;
+	unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
+	union dpcd_training_pattern training_pattern;
+	enum dpcd_phy_test_patterns pattern;
+
+	memset(&training_pattern, 0, sizeof(training_pattern));
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (pipes[i].stream->sink->link == link) {
+			pipe_ctx = &pipes[i];
+			break;
+		}
+	}
+
+	/* Reset CRTC Test Pattern if it is currently running and request
+	 * is VideoMode Reset DP Phy Test Pattern if it is currently running
+	 * and request is VideoMode
+	 */
+	if (link->test_pattern_enabled && test_pattern ==
+			DP_TEST_PATTERN_VIDEO_MODE) {
+		/* Set CRTC Test Pattern */
+		set_crtc_test_pattern(link, pipe_ctx, test_pattern);
+		dp_set_hw_test_pattern(link, test_pattern,
+				(uint8_t *)p_custom_pattern,
+				(uint32_t)cust_pattern_size);
+
+		/* Unblank Stream */
+		link->dc->hwss.unblank_stream(
+			pipe_ctx,
+			&link->verified_link_cap);
+		/* TODO:m_pHwss->MuteAudioEndpoint
+		 * (pPathMode->pDisplayPath, false);
+		 */
+
+		/* Reset Test Pattern state */
+		link->test_pattern_enabled = false;
+
+		return true;
+	}
+
+	/* Check for PHY Test Patterns */
+	if (is_dp_phy_pattern(test_pattern)) {
+		/* Set DPCD Lane Settings before running test pattern */
+		if (p_link_settings != NULL) {
+			dp_set_hw_lane_settings(link, p_link_settings);
+			dpcd_set_lane_settings(link, p_link_settings);
+		}
+
+		/* Blank stream if running test pattern */
+		if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+			/*TODO:
+			 * m_pHwss->
+			 * MuteAudioEndpoint(pPathMode->pDisplayPath, true);
+			 */
+			/* Blank stream */
+			pipes->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+		}
+
+		dp_set_hw_test_pattern(link, test_pattern,
+				(uint8_t *)p_custom_pattern,
+				(uint32_t)cust_pattern_size);
+
+		if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+			/* Set Test Pattern state */
+			link->test_pattern_enabled = true;
+			if (p_link_settings != NULL)
+				dpcd_set_link_settings(link,
+						p_link_settings);
+		}
+
+		switch (test_pattern) {
+		case DP_TEST_PATTERN_VIDEO_MODE:
+			pattern = PHY_TEST_PATTERN_NONE;
+			break;
+		case DP_TEST_PATTERN_D102:
+			pattern = PHY_TEST_PATTERN_D10_2;
+			break;
+		case DP_TEST_PATTERN_SYMBOL_ERROR:
+			pattern = PHY_TEST_PATTERN_SYMBOL_ERROR;
+			break;
+		case DP_TEST_PATTERN_PRBS7:
+			pattern = PHY_TEST_PATTERN_PRBS7;
+			break;
+		case DP_TEST_PATTERN_80BIT_CUSTOM:
+			pattern = PHY_TEST_PATTERN_80BIT_CUSTOM;
+			break;
+		case DP_TEST_PATTERN_CP2520_1:
+			pattern = PHY_TEST_PATTERN_CP2520_1;
+			break;
+		case DP_TEST_PATTERN_CP2520_2:
+			pattern = PHY_TEST_PATTERN_CP2520_2;
+			break;
+		case DP_TEST_PATTERN_CP2520_3:
+			pattern = PHY_TEST_PATTERN_CP2520_3;
+			break;
+		default:
+			return false;
+		}
+
+		if (test_pattern == DP_TEST_PATTERN_VIDEO_MODE
+		/*TODO:&& !pPathMode->pDisplayPath->IsTargetPoweredOn()*/)
+			return false;
+
+		if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+			/* tell receiver that we are sending qualification
+			 * pattern DP 1.2 or later - DP receiver's link quality
+			 * pattern is set using DPCD LINK_QUAL_LANEx_SET
+			 * register (0x10B~0x10E)\
+			 */
+			for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++)
+				link_qual_pattern[lane] =
+						(unsigned char)(pattern);
+
+			core_link_write_dpcd(link,
+					DP_LINK_QUAL_LANE0_SET,
+					link_qual_pattern,
+					sizeof(link_qual_pattern));
+		} else if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_10 ||
+			   link->dpcd_caps.dpcd_rev.raw == 0) {
+			/* tell receiver that we are sending qualification
+			 * pattern DP 1.1a or earlier - DP receiver's link
+			 * quality pattern is set using
+			 * DPCD TRAINING_PATTERN_SET -> LINK_QUAL_PATTERN_SET
+			 * register (0x102). We will use v_1.3 when we are
+			 * setting test pattern for DP 1.1.
+			 */
+			core_link_read_dpcd(link, DP_TRAINING_PATTERN_SET,
+					    &training_pattern.raw,
+					    sizeof(training_pattern));
+			training_pattern.v1_3.LINK_QUAL_PATTERN_SET = pattern;
+			core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET,
+					     &training_pattern.raw,
+					     sizeof(training_pattern));
+		}
+	} else {
+	/* CRTC Patterns */
+		set_crtc_test_pattern(link, pipe_ctx, test_pattern);
+		/* Set Test Pattern state */
+		link->test_pattern_enabled = true;
+	}
+
+	return true;
+}
+
+void dp_enable_mst_on_sink(struct dc_link *link, bool enable)
+{
+	unsigned char mstmCntl;
+
+	core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
+	if (enable)
+		mstmCntl |= DP_MST_EN;
+	else
+		mstmCntl &= (~DP_MST_EN);
+
+	core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
+}

+ 331 - 0
drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c

@@ -0,0 +1,331 @@
+/* Copyright 2015 Advanced Micro Devices, Inc. */
+
+
+#include "dm_services.h"
+#include "dc.h"
+#include "inc/core_types.h"
+#include "include/ddc_service_types.h"
+#include "include/i2caux_interface.h"
+#include "link_hwss.h"
+#include "hw_sequencer.h"
+#include "dc_link_dp.h"
+#include "dc_link_ddc.h"
+#include "dm_helpers.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dpcd_defs.h"
+
+enum dc_status core_link_read_dpcd(
+	struct dc_link *link,
+	uint32_t address,
+	uint8_t *data,
+	uint32_t size)
+{
+	if (!dm_helpers_dp_read_dpcd(link->ctx,
+			link,
+			address, data, size))
+			return DC_ERROR_UNEXPECTED;
+
+	return DC_OK;
+}
+
+enum dc_status core_link_write_dpcd(
+	struct dc_link *link,
+	uint32_t address,
+	const uint8_t *data,
+	uint32_t size)
+{
+	if (!dm_helpers_dp_write_dpcd(link->ctx,
+			link,
+			address, data, size))
+				return DC_ERROR_UNEXPECTED;
+
+	return DC_OK;
+}
+
+void dp_receiver_power_ctrl(struct dc_link *link, bool on)
+{
+	uint8_t state;
+
+	state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3;
+
+	core_link_write_dpcd(link, DP_SET_POWER, &state,
+			sizeof(state));
+}
+
+void dp_enable_link_phy(
+	struct dc_link *link,
+	enum signal_type signal,
+	enum clock_source_id clock_source,
+	const struct dc_link_settings *link_settings)
+{
+	struct link_encoder *link_enc = link->link_enc;
+
+	struct pipe_ctx *pipes =
+			link->dc->current_state->res_ctx.pipe_ctx;
+	struct clock_source *dp_cs =
+			link->dc->res_pool->dp_clock_source;
+	unsigned int i;
+	/* If the current pixel clock source is not DTO(happens after
+	 * switching from HDMI passive dongle to DP on the same connector),
+	 * switch the pixel clock source to DTO.
+	 */
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (pipes[i].stream != NULL &&
+			pipes[i].stream->sink != NULL &&
+			pipes[i].stream->sink->link == link) {
+			if (pipes[i].clock_source != NULL &&
+					pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
+				pipes[i].clock_source = dp_cs;
+				pipes[i].stream_res.pix_clk_params.requested_pix_clk =
+						pipes[i].stream->timing.pix_clk_khz;
+				pipes[i].clock_source->funcs->program_pix_clk(
+							pipes[i].clock_source,
+							&pipes[i].stream_res.pix_clk_params,
+							&pipes[i].pll_settings);
+			}
+		}
+	}
+
+	if (dc_is_dp_sst_signal(signal)) {
+		if (signal == SIGNAL_TYPE_EDP) {
+			link->dc->hwss.edp_power_control(link->link_enc, true);
+			link_enc->funcs->enable_dp_output(
+						link_enc,
+						link_settings,
+						clock_source);
+			link->dc->hwss.edp_backlight_control(link, true);
+		} else
+			link_enc->funcs->enable_dp_output(
+						link_enc,
+						link_settings,
+						clock_source);
+	} else {
+		link_enc->funcs->enable_dp_mst_output(
+						link_enc,
+						link_settings,
+						clock_source);
+	}
+
+	dp_receiver_power_ctrl(link, true);
+}
+
+static bool edp_receiver_ready_T9(struct dc_link *link)
+{
+	unsigned int tries = 0;
+	unsigned char sinkstatus = 0;
+	unsigned char edpRev = 0;
+	enum dc_status result = DC_OK;
+	result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
+	if (edpRev < DP_EDP_12)
+		return true;
+	/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+	do {
+		sinkstatus = 1;
+		result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+		if (sinkstatus == 0)
+			break;
+		if (result != DC_OK)
+			break;
+		udelay(100); //MAx T9
+	} while (++tries < 50);
+	return result;
+}
+
+void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
+{
+	if (!link->wa_flags.dp_keep_receiver_powered)
+		dp_receiver_power_ctrl(link, false);
+
+	if (signal == SIGNAL_TYPE_EDP) {
+		link->dc->hwss.edp_backlight_control(link, false);
+		edp_receiver_ready_T9(link);
+		link->link_enc->funcs->disable_output(link->link_enc, signal, link);
+		link->dc->hwss.edp_power_control(link->link_enc, false);
+	} else
+		link->link_enc->funcs->disable_output(link->link_enc, signal, link);
+
+	/* Clear current link setting.*/
+	memset(&link->cur_link_settings, 0,
+			sizeof(link->cur_link_settings));
+}
+
+void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal)
+{
+	/* MST disable link only when no stream use the link */
+	if (link->mst_stream_alloc_table.stream_count > 0)
+		return;
+
+	dp_disable_link_phy(link, signal);
+
+	/* set the sink to SST mode after disabling the link */
+	dp_enable_mst_on_sink(link, false);
+}
+
+bool dp_set_hw_training_pattern(
+	struct dc_link *link,
+	enum hw_dp_training_pattern pattern)
+{
+	enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+
+	switch (pattern) {
+	case HW_DP_TRAINING_PATTERN_1:
+		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1;
+		break;
+	case HW_DP_TRAINING_PATTERN_2:
+		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2;
+		break;
+	case HW_DP_TRAINING_PATTERN_3:
+		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3;
+		break;
+	case HW_DP_TRAINING_PATTERN_4:
+		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
+		break;
+	default:
+		break;
+	}
+
+	dp_set_hw_test_pattern(link, test_pattern, NULL, 0);
+
+	return true;
+}
+
+void dp_set_hw_lane_settings(
+	struct dc_link *link,
+	const struct link_training_settings *link_settings)
+{
+	struct link_encoder *encoder = link->link_enc;
+
+	/* call Encoder to set lane settings */
+	encoder->funcs->dp_set_lane_settings(encoder, link_settings);
+}
+
+enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
+{
+	/* We need to explicitly check that connector
+	 * is not DP. Some Travis_VGA get reported
+	 * by video bios as DP.
+	 */
+	if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) {
+
+		switch (link->dpcd_caps.branch_dev_id) {
+		case DP_BRANCH_DEVICE_ID_2:
+			if (strncmp(
+				link->dpcd_caps.branch_dev_name,
+				DP_VGA_LVDS_CONVERTER_ID_2,
+				sizeof(
+				link->dpcd_caps.
+				branch_dev_name)) == 0) {
+				return DP_PANEL_MODE_SPECIAL;
+			}
+			break;
+		case DP_BRANCH_DEVICE_ID_3:
+			if (strncmp(link->dpcd_caps.branch_dev_name,
+				DP_VGA_LVDS_CONVERTER_ID_3,
+				sizeof(
+				link->dpcd_caps.
+				branch_dev_name)) == 0) {
+				return DP_PANEL_MODE_SPECIAL;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (link->dpcd_caps.panel_mode_edp) {
+		return DP_PANEL_MODE_EDP;
+	}
+
+	return DP_PANEL_MODE_DEFAULT;
+}
+
+void dp_set_hw_test_pattern(
+	struct dc_link *link,
+	enum dp_test_pattern test_pattern,
+	uint8_t *custom_pattern,
+	uint32_t custom_pattern_size)
+{
+	struct encoder_set_dp_phy_pattern_param pattern_param = {0};
+	struct link_encoder *encoder = link->link_enc;
+
+	pattern_param.dp_phy_pattern = test_pattern;
+	pattern_param.custom_pattern = custom_pattern;
+	pattern_param.custom_pattern_size = custom_pattern_size;
+	pattern_param.dp_panel_mode = dp_get_panel_mode(link);
+
+	encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param);
+}
+
+void dp_retrain_link_dp_test(struct dc_link *link,
+			struct dc_link_settings *link_setting,
+			bool skip_video_pattern)
+{
+	struct pipe_ctx *pipes =
+			&link->dc->current_state->res_ctx.pipe_ctx[0];
+	unsigned int i;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (pipes[i].stream != NULL &&
+			pipes[i].stream->sink != NULL &&
+			pipes[i].stream->sink->link != NULL &&
+			pipes[i].stream_res.stream_enc != NULL &&
+			pipes[i].stream->sink->link == link) {
+			udelay(100);
+
+			pipes[i].stream_res.stream_enc->funcs->dp_blank(
+					pipes[i].stream_res.stream_enc);
+
+			/* disable any test pattern that might be active */
+			dp_set_hw_test_pattern(link,
+					DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+
+			dp_receiver_power_ctrl(link, false);
+
+			link->dc->hwss.disable_stream(&pipes[i], KEEP_ACQUIRED_RESOURCE);
+
+			link->link_enc->funcs->disable_output(
+					link->link_enc,
+					SIGNAL_TYPE_DISPLAY_PORT,
+					link);
+
+			/* Clear current link setting. */
+			memset(&link->cur_link_settings, 0,
+				sizeof(link->cur_link_settings));
+
+			link->link_enc->funcs->enable_dp_output(
+						link->link_enc,
+						link_setting,
+						pipes[i].clock_source->id);
+
+			dp_receiver_power_ctrl(link, true);
+
+			perform_link_training_with_retries(
+					link,
+					link_setting,
+					skip_video_pattern,
+					LINK_TRAINING_ATTEMPTS);
+
+			link->cur_link_settings = *link_setting;
+
+			link->dc->hwss.enable_stream(&pipes[i]);
+
+			link->dc->hwss.unblank_stream(&pipes[i],
+					link_setting);
+
+			if (pipes[i].stream_res.audio) {
+				/* notify audio driver for
+				 * audio modes of monitor */
+				pipes[i].stream_res.audio->funcs->az_enable(
+						pipes[i].stream_res.audio);
+
+				/* un-mute audio */
+				/* TODO: audio should be per stream rather than
+				 * per link */
+				pipes[i].stream_res.stream_enc->funcs->
+				audio_mute_control(
+					pipes[i].stream_res.stream_enc, false);
+			}
+		}
+	}
+}

+ 2795 - 0
drivers/gpu/drm/amd/display/dc/core/dc_resource.c

@@ -0,0 +1,2795 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "link_encoder.h"
+#include "stream_encoder.h"
+#include "opp.h"
+#include "timing_generator.h"
+#include "transform.h"
+#include "dpp.h"
+#include "core_types.h"
+#include "set_mode_types.h"
+#include "virtual/virtual_stream_encoder.h"
+
+#include "dce80/dce80_resource.h"
+#include "dce100/dce100_resource.h"
+#include "dce110/dce110_resource.h"
+#include "dce112/dce112_resource.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/dcn10_resource.h"
+#endif
+#include "dce120/dce120_resource.h"
+
+enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
+{
+	enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+	switch (asic_id.chip_family) {
+
+	case FAMILY_CI:
+		dc_version = DCE_VERSION_8_0;
+		break;
+	case FAMILY_KV:
+		if (ASIC_REV_IS_KALINDI(asic_id.hw_internal_rev) ||
+		    ASIC_REV_IS_BHAVANI(asic_id.hw_internal_rev) ||
+		    ASIC_REV_IS_GODAVARI(asic_id.hw_internal_rev))
+			dc_version = DCE_VERSION_8_3;
+		else
+			dc_version = DCE_VERSION_8_1;
+		break;
+	case FAMILY_CZ:
+		dc_version = DCE_VERSION_11_0;
+		break;
+
+	case FAMILY_VI:
+		if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
+				ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
+			dc_version = DCE_VERSION_10_0;
+			break;
+		}
+		if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
+				ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
+				ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
+			dc_version = DCE_VERSION_11_2;
+		}
+		break;
+	case FAMILY_AI:
+		dc_version = DCE_VERSION_12_0;
+		break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+	case FAMILY_RV:
+		dc_version = DCN_VERSION_1_0;
+		break;
+#endif
+	default:
+		dc_version = DCE_VERSION_UNKNOWN;
+		break;
+	}
+	return dc_version;
+}
+
+struct resource_pool *dc_create_resource_pool(
+				struct dc  *dc,
+				int num_virtual_links,
+				enum dce_version dc_version,
+				struct hw_asic_id asic_id)
+{
+	struct resource_pool *res_pool = NULL;
+
+	switch (dc_version) {
+	case DCE_VERSION_8_0:
+		res_pool = dce80_create_resource_pool(
+			num_virtual_links, dc);
+		break;
+	case DCE_VERSION_8_1:
+		res_pool = dce81_create_resource_pool(
+			num_virtual_links, dc);
+		break;
+	case DCE_VERSION_8_3:
+		res_pool = dce83_create_resource_pool(
+			num_virtual_links, dc);
+		break;
+	case DCE_VERSION_10_0:
+		res_pool = dce100_create_resource_pool(
+				num_virtual_links, dc);
+		break;
+	case DCE_VERSION_11_0:
+		res_pool = dce110_create_resource_pool(
+			num_virtual_links, dc, asic_id);
+		break;
+	case DCE_VERSION_11_2:
+		res_pool = dce112_create_resource_pool(
+			num_virtual_links, dc);
+		break;
+	case DCE_VERSION_12_0:
+		res_pool = dce120_create_resource_pool(
+			num_virtual_links, dc);
+		break;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+	case DCN_VERSION_1_0:
+		res_pool = dcn10_create_resource_pool(
+				num_virtual_links, dc);
+		break;
+#endif
+
+
+	default:
+		break;
+	}
+	if (res_pool != NULL) {
+		struct dc_firmware_info fw_info = { { 0 } };
+
+		if (dc->ctx->dc_bios->funcs->get_firmware_info(
+				dc->ctx->dc_bios, &fw_info) == BP_RESULT_OK) {
+				res_pool->ref_clock_inKhz = fw_info.pll_info.crystal_frequency;
+			} else
+				ASSERT_CRITICAL(false);
+	}
+
+	return res_pool;
+}
+
+void dc_destroy_resource_pool(struct dc  *dc)
+{
+	if (dc) {
+		if (dc->res_pool)
+			dc->res_pool->funcs->destroy(&dc->res_pool);
+
+		kfree(dc->hwseq);
+	}
+}
+
+static void update_num_audio(
+	const struct resource_straps *straps,
+	unsigned int *num_audio,
+	struct audio_support *aud_support)
+{
+	aud_support->dp_audio = true;
+	aud_support->hdmi_audio_native = false;
+	aud_support->hdmi_audio_on_dongle = false;
+
+	if (straps->hdmi_disable == 0) {
+		if (straps->dc_pinstraps_audio & 0x2) {
+			aud_support->hdmi_audio_on_dongle = true;
+			aud_support->hdmi_audio_native = true;
+		}
+	}
+
+	switch (straps->audio_stream_number) {
+	case 0: /* multi streams supported */
+		break;
+	case 1: /* multi streams not supported */
+		*num_audio = 1;
+		break;
+	default:
+		DC_ERR("DC: unexpected audio fuse!\n");
+	}
+}
+
+bool resource_construct(
+	unsigned int num_virtual_links,
+	struct dc  *dc,
+	struct resource_pool *pool,
+	const struct resource_create_funcs *create_funcs)
+{
+	struct dc_context *ctx = dc->ctx;
+	const struct resource_caps *caps = pool->res_cap;
+	int i;
+	unsigned int num_audio = caps->num_audio;
+	struct resource_straps straps = {0};
+
+	if (create_funcs->read_dce_straps)
+		create_funcs->read_dce_straps(dc->ctx, &straps);
+
+	pool->audio_count = 0;
+	if (create_funcs->create_audio) {
+		/* find the total number of streams available via the
+		 * AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+		 * registers (one for each pin) starting from pin 1
+		 * up to the max number of audio pins.
+		 * We stop on the first pin where
+		 * PORT_CONNECTIVITY == 1 (as instructed by HW team).
+		 */
+		update_num_audio(&straps, &num_audio, &pool->audio_support);
+		for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
+			struct audio *aud = create_funcs->create_audio(ctx, i);
+
+			if (aud == NULL) {
+				DC_ERR("DC: failed to create audio!\n");
+				return false;
+			}
+
+			if (!aud->funcs->endpoint_valid(aud)) {
+				aud->funcs->destroy(&aud);
+				break;
+			}
+
+			pool->audios[i] = aud;
+			pool->audio_count++;
+		}
+	}
+
+	pool->stream_enc_count = 0;
+	if (create_funcs->create_stream_encoder) {
+		for (i = 0; i < caps->num_stream_encoder; i++) {
+			pool->stream_enc[i] = create_funcs->create_stream_encoder(i, ctx);
+			if (pool->stream_enc[i] == NULL)
+				DC_ERR("DC: failed to create stream_encoder!\n");
+			pool->stream_enc_count++;
+		}
+	}
+	dc->caps.dynamic_audio = false;
+	if (pool->audio_count < pool->stream_enc_count) {
+		dc->caps.dynamic_audio = true;
+	}
+	for (i = 0; i < num_virtual_links; i++) {
+		pool->stream_enc[pool->stream_enc_count] =
+			virtual_stream_encoder_create(
+					ctx, ctx->dc_bios);
+		if (pool->stream_enc[pool->stream_enc_count] == NULL) {
+			DC_ERR("DC: failed to create stream_encoder!\n");
+			return false;
+		}
+		pool->stream_enc_count++;
+	}
+
+	dc->hwseq = create_funcs->create_hwseq(ctx);
+
+	return true;
+}
+
+
+void resource_unreference_clock_source(
+		struct resource_context *res_ctx,
+		const struct resource_pool *pool,
+		struct clock_source *clock_source)
+{
+	int i;
+
+	for (i = 0; i < pool->clk_src_count; i++) {
+		if (pool->clock_sources[i] != clock_source)
+			continue;
+
+		res_ctx->clock_source_ref_count[i]--;
+
+		break;
+	}
+
+	if (pool->dp_clock_source == clock_source)
+		res_ctx->dp_clock_source_ref_count--;
+}
+
+void resource_reference_clock_source(
+		struct resource_context *res_ctx,
+		const struct resource_pool *pool,
+		struct clock_source *clock_source)
+{
+	int i;
+	for (i = 0; i < pool->clk_src_count; i++) {
+		if (pool->clock_sources[i] != clock_source)
+			continue;
+
+		res_ctx->clock_source_ref_count[i]++;
+		break;
+	}
+
+	if (pool->dp_clock_source == clock_source)
+		res_ctx->dp_clock_source_ref_count++;
+}
+
+bool resource_are_streams_timing_synchronizable(
+	struct dc_stream_state *stream1,
+	struct dc_stream_state *stream2)
+{
+	if (stream1->timing.h_total != stream2->timing.h_total)
+		return false;
+
+	if (stream1->timing.v_total != stream2->timing.v_total)
+		return false;
+
+	if (stream1->timing.h_addressable
+				!= stream2->timing.h_addressable)
+		return false;
+
+	if (stream1->timing.v_addressable
+				!= stream2->timing.v_addressable)
+		return false;
+
+	if (stream1->timing.pix_clk_khz
+				!= stream2->timing.pix_clk_khz)
+		return false;
+
+	if (stream1->phy_pix_clk != stream2->phy_pix_clk
+			&& (!dc_is_dp_signal(stream1->signal)
+			|| !dc_is_dp_signal(stream2->signal)))
+		return false;
+
+	return true;
+}
+
+static bool is_sharable_clk_src(
+	const struct pipe_ctx *pipe_with_clk_src,
+	const struct pipe_ctx *pipe)
+{
+	if (pipe_with_clk_src->clock_source == NULL)
+		return false;
+
+	if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL)
+		return false;
+
+	if (dc_is_dp_signal(pipe_with_clk_src->stream->signal))
+		return false;
+
+	if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
+			&& dc_is_dvi_signal(pipe->stream->signal))
+		return false;
+
+	if (dc_is_hdmi_signal(pipe->stream->signal)
+			&& dc_is_dvi_signal(pipe_with_clk_src->stream->signal))
+		return false;
+
+	if (!resource_are_streams_timing_synchronizable(
+			pipe_with_clk_src->stream, pipe->stream))
+		return false;
+
+	return true;
+}
+
+struct clock_source *resource_find_used_clk_src_for_sharing(
+					struct resource_context *res_ctx,
+					struct pipe_ctx *pipe_ctx)
+{
+	int i;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (is_sharable_clk_src(&res_ctx->pipe_ctx[i], pipe_ctx))
+			return res_ctx->pipe_ctx[i].clock_source;
+	}
+
+	return NULL;
+}
+
+static enum pixel_format convert_pixel_format_to_dalsurface(
+		enum surface_pixel_format surface_pixel_format)
+{
+	enum pixel_format dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
+
+	switch (surface_pixel_format) {
+	case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+		dal_pixel_format = PIXEL_FORMAT_INDEX8;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+		dal_pixel_format = PIXEL_FORMAT_RGB565;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+		dal_pixel_format = PIXEL_FORMAT_RGB565;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+		dal_pixel_format = PIXEL_FORMAT_ARGB8888;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+		dal_pixel_format = PIXEL_FORMAT_ARGB8888;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+		dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+		dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+		dal_pixel_format = PIXEL_FORMAT_ARGB2101010_XRBIAS;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+		dal_pixel_format = PIXEL_FORMAT_FP16;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+		dal_pixel_format = PIXEL_FORMAT_420BPP8;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+		dal_pixel_format = PIXEL_FORMAT_420BPP10;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+	default:
+		dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
+		break;
+	}
+	return dal_pixel_format;
+}
+
+static void rect_swap_helper(struct rect *rect)
+{
+	uint32_t temp = 0;
+
+	temp = rect->height;
+	rect->height = rect->width;
+	rect->width = temp;
+
+	temp = rect->x;
+	rect->x = rect->y;
+	rect->y = temp;
+}
+
+static void calculate_viewport(struct pipe_ctx *pipe_ctx)
+{
+	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+	const struct dc_stream_state *stream = pipe_ctx->stream;
+	struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
+	struct rect surf_src = plane_state->src_rect;
+	struct rect clip = { 0 };
+	int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
+			|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+	bool pri_split = pipe_ctx->bottom_pipe &&
+			pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
+	bool sec_split = pipe_ctx->top_pipe &&
+			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+
+	if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+		stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
+		pri_split = false;
+		sec_split = false;
+	}
+
+	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+		rect_swap_helper(&surf_src);
+
+	/* The actual clip is an intersection between stream
+	 * source and surface clip
+	 */
+	clip.x = stream->src.x > plane_state->clip_rect.x ?
+			stream->src.x : plane_state->clip_rect.x;
+
+	clip.width = stream->src.x + stream->src.width <
+			plane_state->clip_rect.x + plane_state->clip_rect.width ?
+			stream->src.x + stream->src.width - clip.x :
+			plane_state->clip_rect.x + plane_state->clip_rect.width - clip.x ;
+
+	clip.y = stream->src.y > plane_state->clip_rect.y ?
+			stream->src.y : plane_state->clip_rect.y;
+
+	clip.height = stream->src.y + stream->src.height <
+			plane_state->clip_rect.y + plane_state->clip_rect.height ?
+			stream->src.y + stream->src.height - clip.y :
+			plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ;
+
+	/* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio
+	 * num_pixels = clip.num_pix * scl_ratio
+	 */
+	data->viewport.x = surf_src.x + (clip.x - plane_state->dst_rect.x) *
+			surf_src.width / plane_state->dst_rect.width;
+	data->viewport.width = clip.width *
+			surf_src.width / plane_state->dst_rect.width;
+
+	data->viewport.y = surf_src.y + (clip.y - plane_state->dst_rect.y) *
+			surf_src.height / plane_state->dst_rect.height;
+	data->viewport.height = clip.height *
+			surf_src.height / plane_state->dst_rect.height;
+
+	/* Round down, compensate in init */
+	data->viewport_c.x = data->viewport.x / vpc_div;
+	data->viewport_c.y = data->viewport.y / vpc_div;
+	data->inits.h_c = (data->viewport.x % vpc_div) != 0 ?
+			dal_fixed31_32_half : dal_fixed31_32_zero;
+	data->inits.v_c = (data->viewport.y % vpc_div) != 0 ?
+			dal_fixed31_32_half : dal_fixed31_32_zero;
+	/* Round up, assume original video size always even dimensions */
+	data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
+	data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
+
+	/* Handle hsplit */
+	if (pri_split || sec_split) {
+		/* HMirror XOR Secondary_pipe XOR Rotation_180 */
+		bool right_view = (sec_split != plane_state->horizontal_mirror) !=
+					(plane_state->rotation == ROTATION_ANGLE_180);
+
+		if (plane_state->rotation == ROTATION_ANGLE_90
+				|| plane_state->rotation == ROTATION_ANGLE_270)
+			/* Secondary_pipe XOR Rotation_270 */
+			right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
+
+		if (right_view) {
+			data->viewport.width /= 2;
+			data->viewport_c.width /= 2;
+			data->viewport.x +=  data->viewport.width;
+			data->viewport_c.x +=  data->viewport_c.width;
+			/* Ceil offset pipe */
+			data->viewport.width += data->viewport.width % 2;
+			data->viewport_c.width += data->viewport_c.width % 2;
+		} else {
+			data->viewport.width /= 2;
+			data->viewport_c.width /= 2;
+		}
+	}
+
+	if (plane_state->rotation == ROTATION_ANGLE_90 ||
+			plane_state->rotation == ROTATION_ANGLE_270) {
+		rect_swap_helper(&data->viewport_c);
+		rect_swap_helper(&data->viewport);
+	}
+}
+
+static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+{
+	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+	const struct dc_stream_state *stream = pipe_ctx->stream;
+	struct rect surf_src = plane_state->src_rect;
+	struct rect surf_clip = plane_state->clip_rect;
+	int recout_full_x, recout_full_y;
+
+	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+		rect_swap_helper(&surf_src);
+
+	pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x;
+	if (stream->src.x < surf_clip.x)
+		pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x
+			- stream->src.x) * stream->dst.width
+						/ stream->src.width;
+
+	pipe_ctx->plane_res.scl_data.recout.width = surf_clip.width *
+			stream->dst.width / stream->src.width;
+	if (pipe_ctx->plane_res.scl_data.recout.width + pipe_ctx->plane_res.scl_data.recout.x >
+			stream->dst.x + stream->dst.width)
+		pipe_ctx->plane_res.scl_data.recout.width =
+			stream->dst.x + stream->dst.width
+						- pipe_ctx->plane_res.scl_data.recout.x;
+
+	pipe_ctx->plane_res.scl_data.recout.y = stream->dst.y;
+	if (stream->src.y < surf_clip.y)
+		pipe_ctx->plane_res.scl_data.recout.y += (surf_clip.y
+			- stream->src.y) * stream->dst.height
+						/ stream->src.height;
+
+	pipe_ctx->plane_res.scl_data.recout.height = surf_clip.height *
+			stream->dst.height / stream->src.height;
+	if (pipe_ctx->plane_res.scl_data.recout.height + pipe_ctx->plane_res.scl_data.recout.y >
+			stream->dst.y + stream->dst.height)
+		pipe_ctx->plane_res.scl_data.recout.height =
+			stream->dst.y + stream->dst.height
+						- pipe_ctx->plane_res.scl_data.recout.y;
+
+	/* Handle h & vsplit */
+	if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state ==
+		pipe_ctx->plane_state) {
+		if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
+			pipe_ctx->plane_res.scl_data.recout.height /= 2;
+			pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height;
+			/* Floor primary pipe, ceil 2ndary pipe */
+			pipe_ctx->plane_res.scl_data.recout.height += pipe_ctx->plane_res.scl_data.recout.height % 2;
+		} else {
+			pipe_ctx->plane_res.scl_data.recout.width /= 2;
+			pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
+			pipe_ctx->plane_res.scl_data.recout.width += pipe_ctx->plane_res.scl_data.recout.width % 2;
+		}
+	} else if (pipe_ctx->bottom_pipe &&
+			pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state) {
+		if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
+			pipe_ctx->plane_res.scl_data.recout.height /= 2;
+		else
+			pipe_ctx->plane_res.scl_data.recout.width /= 2;
+	}
+
+	/* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
+	 * 				* 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
+	 * 				ratio)
+	 */
+	recout_full_x = stream->dst.x + (plane_state->dst_rect.x -  stream->src.x)
+					* stream->dst.width / stream->src.width -
+			surf_src.x * plane_state->dst_rect.width / surf_src.width
+					* stream->dst.width / stream->src.width;
+	recout_full_y = stream->dst.y + (plane_state->dst_rect.y -  stream->src.y)
+					* stream->dst.height / stream->src.height -
+			surf_src.y * plane_state->dst_rect.height / surf_src.height
+					* stream->dst.height / stream->src.height;
+
+	recout_skip->width = pipe_ctx->plane_res.scl_data.recout.x - recout_full_x;
+	recout_skip->height = pipe_ctx->plane_res.scl_data.recout.y - recout_full_y;
+}
+
+static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
+{
+	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+	const struct dc_stream_state *stream = pipe_ctx->stream;
+	struct rect surf_src = plane_state->src_rect;
+	const int in_w = stream->src.width;
+	const int in_h = stream->src.height;
+	const int out_w = stream->dst.width;
+	const int out_h = stream->dst.height;
+
+	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+		rect_swap_helper(&surf_src);
+
+	pipe_ctx->plane_res.scl_data.ratios.horz = dal_fixed31_32_from_fraction(
+					surf_src.width,
+					plane_state->dst_rect.width);
+	pipe_ctx->plane_res.scl_data.ratios.vert = dal_fixed31_32_from_fraction(
+					surf_src.height,
+					plane_state->dst_rect.height);
+
+	if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
+		pipe_ctx->plane_res.scl_data.ratios.horz.value *= 2;
+	else if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
+		pipe_ctx->plane_res.scl_data.ratios.vert.value *= 2;
+
+	pipe_ctx->plane_res.scl_data.ratios.vert.value = div64_s64(
+		pipe_ctx->plane_res.scl_data.ratios.vert.value * in_h, out_h);
+	pipe_ctx->plane_res.scl_data.ratios.horz.value = div64_s64(
+		pipe_ctx->plane_res.scl_data.ratios.horz.value * in_w, out_w);
+
+	pipe_ctx->plane_res.scl_data.ratios.horz_c = pipe_ctx->plane_res.scl_data.ratios.horz;
+	pipe_ctx->plane_res.scl_data.ratios.vert_c = pipe_ctx->plane_res.scl_data.ratios.vert;
+
+	if (pipe_ctx->plane_res.scl_data.format == PIXEL_FORMAT_420BPP8
+			|| pipe_ctx->plane_res.scl_data.format == PIXEL_FORMAT_420BPP10) {
+		pipe_ctx->plane_res.scl_data.ratios.horz_c.value /= 2;
+		pipe_ctx->plane_res.scl_data.ratios.vert_c.value /= 2;
+	}
+}
+
+static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+{
+	struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
+	struct rect src = pipe_ctx->plane_state->src_rect;
+	int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
+			|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+
+
+	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
+		rect_swap_helper(&src);
+		rect_swap_helper(&data->viewport_c);
+		rect_swap_helper(&data->viewport);
+	}
+
+	/*
+	 * Init calculated according to formula:
+	 * 	init = (scaling_ratio + number_of_taps + 1) / 2
+	 * 	init_bot = init + scaling_ratio
+	 * 	init_c = init + truncated_vp_c_offset(from calculate viewport)
+	 */
+	data->inits.h = dal_fixed31_32_div_int(
+			dal_fixed31_32_add_int(data->ratios.horz, data->taps.h_taps + 1), 2);
+
+	data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_div_int(
+			dal_fixed31_32_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2));
+
+	data->inits.v = dal_fixed31_32_div_int(
+			dal_fixed31_32_add_int(data->ratios.vert, data->taps.v_taps + 1), 2);
+
+	data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_div_int(
+			dal_fixed31_32_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2));
+
+
+	/* Adjust for viewport end clip-off */
+	if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+		int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
+		int int_part = dal_fixed31_32_floor(
+				dal_fixed31_32_sub(data->inits.h, data->ratios.horz));
+
+		int_part = int_part > 0 ? int_part : 0;
+		data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
+	}
+	if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
+		int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
+		int int_part = dal_fixed31_32_floor(
+				dal_fixed31_32_sub(data->inits.v, data->ratios.vert));
+
+		int_part = int_part > 0 ? int_part : 0;
+		data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
+	}
+	if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
+		int vp_clip = (src.x + src.width) / vpc_div -
+				data->viewport_c.width - data->viewport_c.x;
+		int int_part = dal_fixed31_32_floor(
+				dal_fixed31_32_sub(data->inits.h_c, data->ratios.horz_c));
+
+		int_part = int_part > 0 ? int_part : 0;
+		data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
+	}
+	if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
+		int vp_clip = (src.y + src.height) / vpc_div -
+				data->viewport_c.height - data->viewport_c.y;
+		int int_part = dal_fixed31_32_floor(
+				dal_fixed31_32_sub(data->inits.v_c, data->ratios.vert_c));
+
+		int_part = int_part > 0 ? int_part : 0;
+		data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
+	}
+
+	/* Adjust for non-0 viewport offset */
+	if (data->viewport.x) {
+		int int_part;
+
+		data->inits.h = dal_fixed31_32_add(data->inits.h, dal_fixed31_32_mul_int(
+				data->ratios.horz, recout_skip->width));
+		int_part = dal_fixed31_32_floor(data->inits.h) - data->viewport.x;
+		if (int_part < data->taps.h_taps) {
+			int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
+						(data->taps.h_taps - int_part) : data->viewport.x;
+			data->viewport.x -= int_adj;
+			data->viewport.width += int_adj;
+			int_part += int_adj;
+		} else if (int_part > data->taps.h_taps) {
+			data->viewport.x += int_part - data->taps.h_taps;
+			data->viewport.width -= int_part - data->taps.h_taps;
+			int_part = data->taps.h_taps;
+		}
+		data->inits.h.value &= 0xffffffff;
+		data->inits.h = dal_fixed31_32_add_int(data->inits.h, int_part);
+	}
+
+	if (data->viewport_c.x) {
+		int int_part;
+
+		data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_mul_int(
+				data->ratios.horz_c, recout_skip->width));
+		int_part = dal_fixed31_32_floor(data->inits.h_c) - data->viewport_c.x;
+		if (int_part < data->taps.h_taps_c) {
+			int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
+					(data->taps.h_taps_c - int_part) : data->viewport_c.x;
+			data->viewport_c.x -= int_adj;
+			data->viewport_c.width += int_adj;
+			int_part += int_adj;
+		} else if (int_part > data->taps.h_taps_c) {
+			data->viewport_c.x += int_part - data->taps.h_taps_c;
+			data->viewport_c.width -= int_part - data->taps.h_taps_c;
+			int_part = data->taps.h_taps_c;
+		}
+		data->inits.h_c.value &= 0xffffffff;
+		data->inits.h_c = dal_fixed31_32_add_int(data->inits.h_c, int_part);
+	}
+
+	if (data->viewport.y) {
+		int int_part;
+
+		data->inits.v = dal_fixed31_32_add(data->inits.v, dal_fixed31_32_mul_int(
+				data->ratios.vert, recout_skip->height));
+		int_part = dal_fixed31_32_floor(data->inits.v) - data->viewport.y;
+		if (int_part < data->taps.v_taps) {
+			int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
+						(data->taps.v_taps - int_part) : data->viewport.y;
+			data->viewport.y -= int_adj;
+			data->viewport.height += int_adj;
+			int_part += int_adj;
+		} else if (int_part > data->taps.v_taps) {
+			data->viewport.y += int_part - data->taps.v_taps;
+			data->viewport.height -= int_part - data->taps.v_taps;
+			int_part = data->taps.v_taps;
+		}
+		data->inits.v.value &= 0xffffffff;
+		data->inits.v = dal_fixed31_32_add_int(data->inits.v, int_part);
+	}
+
+	if (data->viewport_c.y) {
+		int int_part;
+
+		data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_mul_int(
+				data->ratios.vert_c, recout_skip->height));
+		int_part = dal_fixed31_32_floor(data->inits.v_c) - data->viewport_c.y;
+		if (int_part < data->taps.v_taps_c) {
+			int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
+					(data->taps.v_taps_c - int_part) : data->viewport_c.y;
+			data->viewport_c.y -= int_adj;
+			data->viewport_c.height += int_adj;
+			int_part += int_adj;
+		} else if (int_part > data->taps.v_taps_c) {
+			data->viewport_c.y += int_part - data->taps.v_taps_c;
+			data->viewport_c.height -= int_part - data->taps.v_taps_c;
+			int_part = data->taps.v_taps_c;
+		}
+		data->inits.v_c.value &= 0xffffffff;
+		data->inits.v_c = dal_fixed31_32_add_int(data->inits.v_c, int_part);
+	}
+
+	/* Interlaced inits based on final vert inits */
+	data->inits.v_bot = dal_fixed31_32_add(data->inits.v, data->ratios.vert);
+	data->inits.v_c_bot = dal_fixed31_32_add(data->inits.v_c, data->ratios.vert_c);
+
+	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
+		rect_swap_helper(&data->viewport_c);
+		rect_swap_helper(&data->viewport);
+	}
+}
+
+bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
+{
+	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+	struct view recout_skip = { 0 };
+	bool res = false;
+
+	/* Important: scaling ratio calculation requires pixel format,
+	 * lb depth calculation requires recout and taps require scaling ratios.
+	 * Inits require viewport, taps, ratios and recout of split pipe
+	 */
+	pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
+			pipe_ctx->plane_state->format);
+
+	calculate_scaling_ratios(pipe_ctx);
+
+	calculate_viewport(pipe_ctx);
+
+	if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
+		return false;
+
+	calculate_recout(pipe_ctx, &recout_skip);
+
+	/**
+	 * Setting line buffer pixel depth to 24bpp yields banding
+	 * on certain displays, such as the Sharp 4k
+	 */
+	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
+
+	pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
+	pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
+
+	pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
+	pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
+
+	/* Taps calculations */
+	if (pipe_ctx->plane_res.xfm != NULL)
+		res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+				pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+
+	if (pipe_ctx->plane_res.dpp != NULL)
+		res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+				pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+
+	if (!res) {
+		/* Try 24 bpp linebuffer */
+		pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP;
+
+		res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+			pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+
+		res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+	}
+
+	if (res)
+		/* May need to re-check lb size after this in some obscure scenario */
+		calculate_inits_and_adj_vp(pipe_ctx, &recout_skip);
+
+	dm_logger_write(pipe_ctx->stream->ctx->logger, LOG_SCALER,
+				"%s: Viewport:\nheight:%d width:%d x:%d "
+				"y:%d\n dst_rect:\nheight:%d width:%d x:%d "
+				"y:%d\n",
+				__func__,
+				pipe_ctx->plane_res.scl_data.viewport.height,
+				pipe_ctx->plane_res.scl_data.viewport.width,
+				pipe_ctx->plane_res.scl_data.viewport.x,
+				pipe_ctx->plane_res.scl_data.viewport.y,
+				plane_state->dst_rect.height,
+				plane_state->dst_rect.width,
+				plane_state->dst_rect.x,
+				plane_state->dst_rect.y);
+
+	return res;
+}
+
+
+enum dc_status resource_build_scaling_params_for_context(
+	const struct dc  *dc,
+	struct dc_state *context)
+{
+	int i;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (context->res_ctx.pipe_ctx[i].plane_state != NULL &&
+				context->res_ctx.pipe_ctx[i].stream != NULL)
+			if (!resource_build_scaling_params(&context->res_ctx.pipe_ctx[i]))
+				return DC_FAIL_SCALING;
+	}
+
+	return DC_OK;
+}
+
+struct pipe_ctx *find_idle_secondary_pipe(
+		struct resource_context *res_ctx,
+		const struct resource_pool *pool)
+{
+	int i;
+	struct pipe_ctx *secondary_pipe = NULL;
+
+	/*
+	 * search backwards for the second pipe to keep pipe
+	 * assignment more consistent
+	 */
+
+	for (i = pool->pipe_count - 1; i >= 0; i--) {
+		if (res_ctx->pipe_ctx[i].stream == NULL) {
+			secondary_pipe = &res_ctx->pipe_ctx[i];
+			secondary_pipe->pipe_idx = i;
+			break;
+		}
+	}
+
+
+	return secondary_pipe;
+}
+
+struct pipe_ctx *resource_get_head_pipe_for_stream(
+		struct resource_context *res_ctx,
+		struct dc_stream_state *stream)
+{
+	int i;
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (res_ctx->pipe_ctx[i].stream == stream &&
+				!res_ctx->pipe_ctx[i].top_pipe) {
+			return &res_ctx->pipe_ctx[i];
+			break;
+		}
+	}
+	return NULL;
+}
+
+static struct pipe_ctx *resource_get_tail_pipe_for_stream(
+		struct resource_context *res_ctx,
+		struct dc_stream_state *stream)
+{
+	struct pipe_ctx *head_pipe, *tail_pipe;
+	head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
+
+	if (!head_pipe)
+		return NULL;
+
+	tail_pipe = head_pipe->bottom_pipe;
+
+	while (tail_pipe) {
+		head_pipe = tail_pipe;
+		tail_pipe = tail_pipe->bottom_pipe;
+	}
+
+	return head_pipe;
+}
+
+/*
+ * A free_pipe for a stream is defined here as a pipe
+ * that has no surface attached yet
+ */
+static struct pipe_ctx *acquire_free_pipe_for_stream(
+		struct dc_state *context,
+		const struct resource_pool *pool,
+		struct dc_stream_state *stream)
+{
+	int i;
+	struct resource_context *res_ctx = &context->res_ctx;
+
+	struct pipe_ctx *head_pipe = NULL;
+
+	/* Find head pipe, which has the back end set up*/
+
+	head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
+
+	if (!head_pipe)
+		ASSERT(0);
+
+	if (!head_pipe->plane_state)
+		return head_pipe;
+
+	/* Re-use pipe already acquired for this stream if available*/
+	for (i = pool->pipe_count - 1; i >= 0; i--) {
+		if (res_ctx->pipe_ctx[i].stream == stream &&
+				!res_ctx->pipe_ctx[i].plane_state) {
+			return &res_ctx->pipe_ctx[i];
+		}
+	}
+
+	/*
+	 * At this point we have no re-useable pipe for this stream and we need
+	 * to acquire an idle one to satisfy the request
+	 */
+
+	if (!pool->funcs->acquire_idle_pipe_for_layer)
+		return NULL;
+
+	return pool->funcs->acquire_idle_pipe_for_layer(context, pool, stream);
+
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+static int acquire_first_split_pipe(
+		struct resource_context *res_ctx,
+		const struct resource_pool *pool,
+		struct dc_stream_state *stream)
+{
+	int i;
+
+	for (i = 0; i < pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+		if (pipe_ctx->top_pipe &&
+				pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state) {
+			pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
+			if (pipe_ctx->bottom_pipe)
+				pipe_ctx->bottom_pipe->top_pipe = pipe_ctx->top_pipe;
+
+			memset(pipe_ctx, 0, sizeof(*pipe_ctx));
+			pipe_ctx->stream_res.tg = pool->timing_generators[i];
+			pipe_ctx->plane_res.hubp = pool->hubps[i];
+			pipe_ctx->plane_res.ipp = pool->ipps[i];
+			pipe_ctx->plane_res.dpp = pool->dpps[i];
+			pipe_ctx->stream_res.opp = pool->opps[i];
+			pipe_ctx->pipe_idx = i;
+
+			pipe_ctx->stream = stream;
+			return i;
+		}
+	}
+	return -1;
+}
+#endif
+
+bool dc_add_plane_to_context(
+		const struct dc *dc,
+		struct dc_stream_state *stream,
+		struct dc_plane_state *plane_state,
+		struct dc_state *context)
+{
+	int i;
+	struct resource_pool *pool = dc->res_pool;
+	struct pipe_ctx *head_pipe, *tail_pipe, *free_pipe;
+	struct dc_stream_status *stream_status = NULL;
+
+	for (i = 0; i < context->stream_count; i++)
+		if (context->streams[i] == stream) {
+			stream_status = &context->stream_status[i];
+			break;
+		}
+	if (stream_status == NULL) {
+		dm_error("Existing stream not found; failed to attach surface!\n");
+		return false;
+	}
+
+
+	if (stream_status->plane_count == MAX_SURFACE_NUM) {
+		dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
+				plane_state, MAX_SURFACE_NUM);
+		return false;
+	}
+
+	head_pipe = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+	if (!head_pipe) {
+		dm_error("Head pipe not found for stream_state %p !\n", stream);
+		return false;
+	}
+
+	free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+	if (!free_pipe) {
+		int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
+		if (pipe_idx >= 0)
+			free_pipe = &context->res_ctx.pipe_ctx[pipe_idx];
+	}
+#endif
+	if (!free_pipe)
+		return false;
+
+	/* retain new surfaces */
+	dc_plane_state_retain(plane_state);
+	free_pipe->plane_state = plane_state;
+
+	if (head_pipe != free_pipe) {
+
+		tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream);
+		ASSERT(tail_pipe);
+
+		free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
+		free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
+		free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
+		free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
+		free_pipe->clock_source = tail_pipe->clock_source;
+		free_pipe->top_pipe = tail_pipe;
+		tail_pipe->bottom_pipe = free_pipe;
+	}
+
+	/* assign new surfaces*/
+	stream_status->plane_states[stream_status->plane_count] = plane_state;
+
+	stream_status->plane_count++;
+
+	return true;
+}
+
+bool dc_remove_plane_from_context(
+		const struct dc *dc,
+		struct dc_stream_state *stream,
+		struct dc_plane_state *plane_state,
+		struct dc_state *context)
+{
+	int i;
+	struct dc_stream_status *stream_status = NULL;
+	struct resource_pool *pool = dc->res_pool;
+
+	for (i = 0; i < context->stream_count; i++)
+		if (context->streams[i] == stream) {
+			stream_status = &context->stream_status[i];
+			break;
+		}
+
+	if (stream_status == NULL) {
+		dm_error("Existing stream not found; failed to remove plane.\n");
+		return false;
+	}
+
+	/* release pipe for plane*/
+	for (i = pool->pipe_count - 1; i >= 0; i--) {
+		struct pipe_ctx *pipe_ctx;
+
+		if (context->res_ctx.pipe_ctx[i].plane_state == plane_state) {
+			pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+			if (pipe_ctx->top_pipe)
+				pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
+
+			/* Second condition is to avoid setting NULL to top pipe
+			 * of tail pipe making it look like head pipe in subsequent
+			 * deletes
+			 */
+			if (pipe_ctx->bottom_pipe && pipe_ctx->top_pipe)
+				pipe_ctx->bottom_pipe->top_pipe = pipe_ctx->top_pipe;
+
+			/*
+			 * For head pipe detach surfaces from pipe for tail
+			 * pipe just zero it out
+			 */
+			if (!pipe_ctx->top_pipe) {
+				pipe_ctx->plane_state = NULL;
+				pipe_ctx->bottom_pipe = NULL;
+			} else  {
+				memset(pipe_ctx, 0, sizeof(*pipe_ctx));
+			}
+		}
+	}
+
+
+	for (i = 0; i < stream_status->plane_count; i++) {
+		if (stream_status->plane_states[i] == plane_state) {
+
+			dc_plane_state_release(stream_status->plane_states[i]);
+			break;
+		}
+	}
+
+	if (i == stream_status->plane_count) {
+		dm_error("Existing plane_state not found; failed to detach it!\n");
+		return false;
+	}
+
+	stream_status->plane_count--;
+
+	/* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */
+	for (; i < stream_status->plane_count; i++)
+		stream_status->plane_states[i] = stream_status->plane_states[i + 1];
+
+	stream_status->plane_states[stream_status->plane_count] = NULL;
+
+	return true;
+}
+
+bool dc_rem_all_planes_for_stream(
+		const struct dc *dc,
+		struct dc_stream_state *stream,
+		struct dc_state *context)
+{
+	int i, old_plane_count;
+	struct dc_stream_status *stream_status = NULL;
+	struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+
+	for (i = 0; i < context->stream_count; i++)
+			if (context->streams[i] == stream) {
+				stream_status = &context->stream_status[i];
+				break;
+			}
+
+	if (stream_status == NULL) {
+		dm_error("Existing stream %p not found!\n", stream);
+		return false;
+	}
+
+	old_plane_count = stream_status->plane_count;
+
+	for (i = 0; i < old_plane_count; i++)
+		del_planes[i] = stream_status->plane_states[i];
+
+	for (i = 0; i < old_plane_count; i++)
+		if (!dc_remove_plane_from_context(dc, stream, del_planes[i], context))
+			return false;
+
+	return true;
+}
+
+static bool add_all_planes_for_stream(
+		const struct dc *dc,
+		struct dc_stream_state *stream,
+		const struct dc_validation_set set[],
+		int set_count,
+		struct dc_state *context)
+{
+	int i, j;
+
+	for (i = 0; i < set_count; i++)
+		if (set[i].stream == stream)
+			break;
+
+	if (i == set_count) {
+		dm_error("Stream %p not found in set!\n", stream);
+		return false;
+	}
+
+	for (j = 0; j < set[i].plane_count; j++)
+		if (!dc_add_plane_to_context(dc, stream, set[i].plane_states[j], context))
+			return false;
+
+	return true;
+}
+
+bool dc_add_all_planes_for_stream(
+		const struct dc *dc,
+		struct dc_stream_state *stream,
+		struct dc_plane_state * const *plane_states,
+		int plane_count,
+		struct dc_state *context)
+{
+	struct dc_validation_set set;
+	int i;
+
+	set.stream = stream;
+	set.plane_count = plane_count;
+
+	for (i = 0; i < plane_count; i++)
+		set.plane_states[i] = plane_states[i];
+
+	return add_all_planes_for_stream(dc, stream, &set, 1, context);
+}
+
+
+
+static bool is_timing_changed(struct dc_stream_state *cur_stream,
+		struct dc_stream_state *new_stream)
+{
+	if (cur_stream == NULL)
+		return true;
+
+	/* If sink pointer changed, it means this is a hotplug, we should do
+	 * full hw setting.
+	 */
+	if (cur_stream->sink != new_stream->sink)
+		return true;
+
+	/* If output color space is changed, need to reprogram info frames */
+	if (cur_stream->output_color_space != new_stream->output_color_space)
+		return true;
+
+	return memcmp(
+		&cur_stream->timing,
+		&new_stream->timing,
+		sizeof(struct dc_crtc_timing)) != 0;
+}
+
+static bool are_stream_backends_same(
+	struct dc_stream_state *stream_a, struct dc_stream_state *stream_b)
+{
+	if (stream_a == stream_b)
+		return true;
+
+	if (stream_a == NULL || stream_b == NULL)
+		return false;
+
+	if (is_timing_changed(stream_a, stream_b))
+		return false;
+
+	return true;
+}
+
+bool dc_is_stream_unchanged(
+	struct dc_stream_state *old_stream, struct dc_stream_state *stream)
+{
+
+	if (!are_stream_backends_same(old_stream, stream))
+		return false;
+
+	return true;
+}
+
+bool dc_is_stream_scaling_unchanged(
+	struct dc_stream_state *old_stream, struct dc_stream_state *stream)
+{
+	if (old_stream == stream)
+		return true;
+
+	if (old_stream == NULL || stream == NULL)
+		return false;
+
+	if (memcmp(&old_stream->src,
+			&stream->src,
+			sizeof(struct rect)) != 0)
+		return false;
+
+	if (memcmp(&old_stream->dst,
+			&stream->dst,
+			sizeof(struct rect)) != 0)
+		return false;
+
+	return true;
+}
+
+/* Maximum TMDS single link pixel clock 165MHz */
+#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000
+
+static void update_stream_engine_usage(
+		struct resource_context *res_ctx,
+		const struct resource_pool *pool,
+		struct stream_encoder *stream_enc,
+		bool acquired)
+{
+	int i;
+
+	for (i = 0; i < pool->stream_enc_count; i++) {
+		if (pool->stream_enc[i] == stream_enc)
+			res_ctx->is_stream_enc_acquired[i] = acquired;
+	}
+}
+
+/* TODO: release audio object */
+void update_audio_usage(
+		struct resource_context *res_ctx,
+		const struct resource_pool *pool,
+		struct audio *audio,
+		bool acquired)
+{
+	int i;
+	for (i = 0; i < pool->audio_count; i++) {
+		if (pool->audios[i] == audio)
+			res_ctx->is_audio_acquired[i] = acquired;
+	}
+}
+
+static int acquire_first_free_pipe(
+		struct resource_context *res_ctx,
+		const struct resource_pool *pool,
+		struct dc_stream_state *stream)
+{
+	int i;
+
+	for (i = 0; i < pool->pipe_count; i++) {
+		if (!res_ctx->pipe_ctx[i].stream) {
+			struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+			pipe_ctx->stream_res.tg = pool->timing_generators[i];
+			pipe_ctx->plane_res.mi = pool->mis[i];
+			pipe_ctx->plane_res.hubp = pool->hubps[i];
+			pipe_ctx->plane_res.ipp = pool->ipps[i];
+			pipe_ctx->plane_res.xfm = pool->transforms[i];
+			pipe_ctx->plane_res.dpp = pool->dpps[i];
+			pipe_ctx->stream_res.opp = pool->opps[i];
+			pipe_ctx->pipe_idx = i;
+
+
+			pipe_ctx->stream = stream;
+			return i;
+		}
+	}
+	return -1;
+}
+
+static struct stream_encoder *find_first_free_match_stream_enc_for_link(
+		struct resource_context *res_ctx,
+		const struct resource_pool *pool,
+		struct dc_stream_state *stream)
+{
+	int i;
+	int j = -1;
+	struct dc_link *link = stream->sink->link;
+
+	for (i = 0; i < pool->stream_enc_count; i++) {
+		if (!res_ctx->is_stream_enc_acquired[i] &&
+				pool->stream_enc[i]) {
+			/* Store first available for MST second display
+			 * in daisy chain use case */
+			j = i;
+			if (pool->stream_enc[i]->id ==
+					link->link_enc->preferred_engine)
+				return pool->stream_enc[i];
+		}
+	}
+
+	/*
+	 * below can happen in cases when stream encoder is acquired:
+	 * 1) for second MST display in chain, so preferred engine already
+	 * acquired;
+	 * 2) for another link, which preferred engine already acquired by any
+	 * MST configuration.
+	 *
+	 * If signal is of DP type and preferred engine not found, return last available
+	 *
+	 * TODO - This is just a patch up and a generic solution is
+	 * required for non DP connectors.
+	 */
+
+	if (j >= 0 && dc_is_dp_signal(stream->signal))
+		return pool->stream_enc[j];
+
+	return NULL;
+}
+
+static struct audio *find_first_free_audio(
+		struct resource_context *res_ctx,
+		const struct resource_pool *pool)
+{
+	int i;
+	for (i = 0; i < pool->audio_count; i++) {
+		if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
+			return pool->audios[i];
+		}
+	}
+	/*not found the matching one, first come first serve*/
+	for (i = 0; i < pool->audio_count; i++) {
+		if (res_ctx->is_audio_acquired[i] == false) {
+			return pool->audios[i];
+		}
+	}
+	return 0;
+}
+
+bool resource_is_stream_unchanged(
+	struct dc_state *old_context, struct dc_stream_state *stream)
+{
+	int i;
+
+	for (i = 0; i < old_context->stream_count; i++) {
+		struct dc_stream_state *old_stream = old_context->streams[i];
+
+		if (are_stream_backends_same(old_stream, stream))
+				return true;
+	}
+
+	return false;
+}
+
+enum dc_status dc_add_stream_to_ctx(
+		struct dc *dc,
+		struct dc_state *new_ctx,
+		struct dc_stream_state *stream)
+{
+	struct dc_context *dc_ctx = dc->ctx;
+	enum dc_status res;
+
+	if (new_ctx->stream_count >= dc->res_pool->pipe_count) {
+		DC_ERROR("Max streams reached, can add stream %p !\n", stream);
+		return DC_ERROR_UNEXPECTED;
+	}
+
+	new_ctx->streams[new_ctx->stream_count] = stream;
+	dc_stream_retain(stream);
+	new_ctx->stream_count++;
+
+	res = dc->res_pool->funcs->add_stream_to_ctx(dc, new_ctx, stream);
+	if (res != DC_OK)
+		DC_ERROR("Adding stream %p to context failed with err %d!\n", stream, res);
+
+	return res;
+}
+
+enum dc_status dc_remove_stream_from_ctx(
+			struct dc *dc,
+			struct dc_state *new_ctx,
+			struct dc_stream_state *stream)
+{
+	int i;
+	struct dc_context *dc_ctx = dc->ctx;
+	struct pipe_ctx *del_pipe = NULL;
+
+	/* Release primary pipe */
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (new_ctx->res_ctx.pipe_ctx[i].stream == stream &&
+				!new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
+			del_pipe = &new_ctx->res_ctx.pipe_ctx[i];
+
+			ASSERT(del_pipe->stream_res.stream_enc);
+			update_stream_engine_usage(
+					&new_ctx->res_ctx,
+						dc->res_pool,
+					del_pipe->stream_res.stream_enc,
+					false);
+
+			if (del_pipe->stream_res.audio)
+				update_audio_usage(
+					&new_ctx->res_ctx,
+					dc->res_pool,
+					del_pipe->stream_res.audio,
+					false);
+
+			resource_unreference_clock_source(&new_ctx->res_ctx,
+							  dc->res_pool,
+							  del_pipe->clock_source);
+
+			memset(del_pipe, 0, sizeof(*del_pipe));
+
+			break;
+		}
+	}
+
+	if (!del_pipe) {
+		DC_ERROR("Pipe not found for stream %p !\n", stream);
+		return DC_ERROR_UNEXPECTED;
+	}
+
+	for (i = 0; i < new_ctx->stream_count; i++)
+		if (new_ctx->streams[i] == stream)
+			break;
+
+	if (new_ctx->streams[i] != stream) {
+		DC_ERROR("Context doesn't have stream %p !\n", stream);
+		return DC_ERROR_UNEXPECTED;
+	}
+
+	dc_stream_release(new_ctx->streams[i]);
+	new_ctx->stream_count--;
+
+	/* Trim back arrays */
+	for (; i < new_ctx->stream_count; i++) {
+		new_ctx->streams[i] = new_ctx->streams[i + 1];
+		new_ctx->stream_status[i] = new_ctx->stream_status[i + 1];
+	}
+
+	new_ctx->streams[new_ctx->stream_count] = NULL;
+	memset(
+			&new_ctx->stream_status[new_ctx->stream_count],
+			0,
+			sizeof(new_ctx->stream_status[0]));
+
+	return DC_OK;
+}
+
+static void copy_pipe_ctx(
+	const struct pipe_ctx *from_pipe_ctx, struct pipe_ctx *to_pipe_ctx)
+{
+	struct dc_plane_state *plane_state = to_pipe_ctx->plane_state;
+	struct dc_stream_state *stream = to_pipe_ctx->stream;
+
+	*to_pipe_ctx = *from_pipe_ctx;
+	to_pipe_ctx->stream = stream;
+	if (plane_state != NULL)
+		to_pipe_ctx->plane_state = plane_state;
+}
+
+static struct dc_stream_state *find_pll_sharable_stream(
+		struct dc_stream_state *stream_needs_pll,
+		struct dc_state *context)
+{
+	int i;
+
+	for (i = 0; i < context->stream_count; i++) {
+		struct dc_stream_state *stream_has_pll = context->streams[i];
+
+		/* We are looking for non dp, non virtual stream */
+		if (resource_are_streams_timing_synchronizable(
+			stream_needs_pll, stream_has_pll)
+			&& !dc_is_dp_signal(stream_has_pll->signal)
+			&& stream_has_pll->sink->link->connector_signal
+			!= SIGNAL_TYPE_VIRTUAL)
+			return stream_has_pll;
+
+	}
+
+	return NULL;
+}
+
+static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
+{
+	uint32_t pix_clk = timing->pix_clk_khz;
+	uint32_t normalized_pix_clk = pix_clk;
+
+	if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+		pix_clk /= 2;
+	if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
+		switch (timing->display_color_depth) {
+		case COLOR_DEPTH_888:
+			normalized_pix_clk = pix_clk;
+			break;
+		case COLOR_DEPTH_101010:
+			normalized_pix_clk = (pix_clk * 30) / 24;
+			break;
+		case COLOR_DEPTH_121212:
+			normalized_pix_clk = (pix_clk * 36) / 24;
+		break;
+		case COLOR_DEPTH_161616:
+			normalized_pix_clk = (pix_clk * 48) / 24;
+		break;
+		default:
+			ASSERT(0);
+		break;
+		}
+	}
+	return normalized_pix_clk;
+}
+
+static void calculate_phy_pix_clks(struct dc_stream_state *stream)
+{
+	/* update actual pixel clock on all streams */
+	if (dc_is_hdmi_signal(stream->signal))
+		stream->phy_pix_clk = get_norm_pix_clk(
+			&stream->timing);
+	else
+		stream->phy_pix_clk =
+			stream->timing.pix_clk_khz;
+}
+
+enum dc_status resource_map_pool_resources(
+		const struct dc  *dc,
+		struct dc_state *context,
+		struct dc_stream_state *stream)
+{
+	const struct resource_pool *pool = dc->res_pool;
+	int i;
+	struct dc_context *dc_ctx = dc->ctx;
+	struct pipe_ctx *pipe_ctx = NULL;
+	int pipe_idx = -1;
+
+	/* TODO Check if this is needed */
+	/*if (!resource_is_stream_unchanged(old_context, stream)) {
+			if (stream != NULL && old_context->streams[i] != NULL) {
+				stream->bit_depth_params =
+						old_context->streams[i]->bit_depth_params;
+				stream->clamping = old_context->streams[i]->clamping;
+				continue;
+			}
+		}
+	*/
+
+	/* acquire new resources */
+	pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
+
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+	if (pipe_idx < 0)
+		pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
+#endif
+
+	if (pipe_idx < 0)
+		return DC_NO_CONTROLLER_RESOURCE;
+
+	pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
+
+	pipe_ctx->stream_res.stream_enc =
+		find_first_free_match_stream_enc_for_link(
+			&context->res_ctx, pool, stream);
+
+	if (!pipe_ctx->stream_res.stream_enc)
+		return DC_NO_STREAM_ENG_RESOURCE;
+
+	update_stream_engine_usage(
+		&context->res_ctx, pool,
+		pipe_ctx->stream_res.stream_enc,
+		true);
+
+	/* TODO: Add check if ASIC support and EDID audio */
+	if (!stream->sink->converter_disable_audio &&
+	    dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
+	    stream->audio_info.mode_count) {
+		pipe_ctx->stream_res.audio = find_first_free_audio(
+		&context->res_ctx, pool);
+
+		/*
+		 * Audio assigned in order first come first get.
+		 * There are asics which has number of audio
+		 * resources less then number of pipes
+		 */
+		if (pipe_ctx->stream_res.audio)
+			update_audio_usage(&context->res_ctx, pool,
+					   pipe_ctx->stream_res.audio, true);
+	}
+
+	for (i = 0; i < context->stream_count; i++)
+		if (context->streams[i] == stream) {
+			context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
+			context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->id;
+			return DC_OK;
+		}
+
+	DC_ERROR("Stream %p not found in new ctx!\n", stream);
+	return DC_ERROR_UNEXPECTED;
+}
+
+/* first stream in the context is used to populate the rest */
+void validate_guaranteed_copy_streams(
+		struct dc_state *context,
+		int max_streams)
+{
+	int i;
+
+	for (i = 1; i < max_streams; i++) {
+		context->streams[i] = context->streams[0];
+
+		copy_pipe_ctx(&context->res_ctx.pipe_ctx[0],
+			      &context->res_ctx.pipe_ctx[i]);
+		context->res_ctx.pipe_ctx[i].stream =
+				context->res_ctx.pipe_ctx[0].stream;
+
+		dc_stream_retain(context->streams[i]);
+		context->stream_count++;
+	}
+}
+
+void dc_resource_state_copy_construct_current(
+		const struct dc *dc,
+		struct dc_state *dst_ctx)
+{
+	dc_resource_state_copy_construct(dc->current_state, dst_ctx);
+}
+
+
+void dc_resource_state_construct(
+		const struct dc *dc,
+		struct dc_state *dst_ctx)
+{
+	dst_ctx->dis_clk = dc->res_pool->display_clock;
+}
+
+enum dc_status dc_validate_global_state(
+		struct dc *dc,
+		struct dc_state *new_ctx)
+{
+	enum dc_status result = DC_ERROR_UNEXPECTED;
+	int i, j;
+
+	if (dc->res_pool->funcs->validate_global) {
+			result = dc->res_pool->funcs->validate_global(dc, new_ctx);
+			if (result != DC_OK)
+				return result;
+	}
+
+	for (i = 0; new_ctx && i < new_ctx->stream_count; i++) {
+		struct dc_stream_state *stream = new_ctx->streams[i];
+
+		for (j = 0; j < dc->res_pool->pipe_count; j++) {
+			struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[j];
+
+			if (pipe_ctx->stream != stream)
+				continue;
+
+			/* Switch to dp clock source only if there is
+			 * no non dp stream that shares the same timing
+			 * with the dp stream.
+			 */
+			if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
+				!find_pll_sharable_stream(stream, new_ctx)) {
+
+				resource_unreference_clock_source(
+						&new_ctx->res_ctx,
+						dc->res_pool,
+						pipe_ctx->clock_source);
+
+				pipe_ctx->clock_source = dc->res_pool->dp_clock_source;
+				resource_reference_clock_source(
+						&new_ctx->res_ctx,
+						dc->res_pool,
+						 pipe_ctx->clock_source);
+			}
+		}
+	}
+
+	result = resource_build_scaling_params_for_context(dc, new_ctx);
+
+	if (result == DC_OK)
+		if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx))
+			result = DC_FAIL_BANDWIDTH_VALIDATE;
+
+	return result;
+}
+
+static void patch_gamut_packet_checksum(
+		struct encoder_info_packet *gamut_packet)
+{
+	/* For gamut we recalc checksum */
+	if (gamut_packet->valid) {
+		uint8_t chk_sum = 0;
+		uint8_t *ptr;
+		uint8_t i;
+
+		/*start of the Gamut data. */
+		ptr = &gamut_packet->sb[3];
+
+		for (i = 0; i <= gamut_packet->sb[1]; i++)
+			chk_sum += ptr[i];
+
+		gamut_packet->sb[2] = (uint8_t) (0x100 - chk_sum);
+	}
+}
+
+static void set_avi_info_frame(
+		struct encoder_info_packet *info_packet,
+		struct pipe_ctx *pipe_ctx)
+{
+	struct dc_stream_state *stream = pipe_ctx->stream;
+	enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
+	struct info_frame info_frame = { {0} };
+	uint32_t pixel_encoding = 0;
+	enum scanning_type scan_type = SCANNING_TYPE_NODATA;
+	enum dc_aspect_ratio aspect = ASPECT_RATIO_NO_DATA;
+	bool itc = false;
+	uint8_t itc_value = 0;
+	uint8_t cn0_cn1 = 0;
+	unsigned int cn0_cn1_value = 0;
+	uint8_t *check_sum = NULL;
+	uint8_t byte_index = 0;
+	union hdmi_info_packet *hdmi_info = &info_frame.avi_info_packet.info_packet_hdmi;
+	union display_content_support support = {0};
+	unsigned int vic = pipe_ctx->stream->timing.vic;
+	enum dc_timing_3d_format format;
+
+	color_space = pipe_ctx->stream->output_color_space;
+	if (color_space == COLOR_SPACE_UNKNOWN)
+		color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ?
+			COLOR_SPACE_SRGB:COLOR_SPACE_YCBCR709;
+
+	/* Initialize header */
+	hdmi_info->bits.header.info_frame_type = HDMI_INFOFRAME_TYPE_AVI;
+	/* InfoFrameVersion_3 is defined by CEA861F (Section 6.4), but shall
+	* not be used in HDMI 2.0 (Section 10.1) */
+	hdmi_info->bits.header.version = 2;
+	hdmi_info->bits.header.length = HDMI_AVI_INFOFRAME_SIZE;
+
+	/*
+	 * IDO-defined (Y2,Y1,Y0 = 1,1,1) shall not be used by devices built
+	 * according to HDMI 2.0 spec (Section 10.1)
+	 */
+
+	switch (stream->timing.pixel_encoding) {
+	case PIXEL_ENCODING_YCBCR422:
+		pixel_encoding = 1;
+		break;
+
+	case PIXEL_ENCODING_YCBCR444:
+		pixel_encoding = 2;
+		break;
+	case PIXEL_ENCODING_YCBCR420:
+		pixel_encoding = 3;
+		break;
+
+	case PIXEL_ENCODING_RGB:
+	default:
+		pixel_encoding = 0;
+	}
+
+	/* Y0_Y1_Y2 : The pixel encoding */
+	/* H14b AVI InfoFrame has extension on Y-field from 2 bits to 3 bits */
+	hdmi_info->bits.Y0_Y1_Y2 = pixel_encoding;
+
+	/* A0 = 1 Active Format Information valid */
+	hdmi_info->bits.A0 = ACTIVE_FORMAT_VALID;
+
+	/* B0, B1 = 3; Bar info data is valid */
+	hdmi_info->bits.B0_B1 = BAR_INFO_BOTH_VALID;
+
+	hdmi_info->bits.SC0_SC1 = PICTURE_SCALING_UNIFORM;
+
+	/* S0, S1 : Underscan / Overscan */
+	/* TODO: un-hardcode scan type */
+	scan_type = SCANNING_TYPE_UNDERSCAN;
+	hdmi_info->bits.S0_S1 = scan_type;
+
+	/* C0, C1 : Colorimetry */
+	if (color_space == COLOR_SPACE_YCBCR709 ||
+			color_space == COLOR_SPACE_YCBCR709_LIMITED)
+		hdmi_info->bits.C0_C1 = COLORIMETRY_ITU709;
+	else if (color_space == COLOR_SPACE_YCBCR601 ||
+			color_space == COLOR_SPACE_YCBCR601_LIMITED)
+		hdmi_info->bits.C0_C1 = COLORIMETRY_ITU601;
+	else {
+		hdmi_info->bits.C0_C1 = COLORIMETRY_NO_DATA;
+	}
+	if (color_space == COLOR_SPACE_2020_RGB_FULLRANGE ||
+			color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE ||
+			color_space == COLOR_SPACE_2020_YCBCR) {
+		hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
+		hdmi_info->bits.C0_C1   = COLORIMETRY_EXTENDED;
+	} else if (color_space == COLOR_SPACE_ADOBERGB) {
+		hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB;
+		hdmi_info->bits.C0_C1   = COLORIMETRY_EXTENDED;
+	}
+
+	/* TODO: un-hardcode aspect ratio */
+	aspect = stream->timing.aspect_ratio;
+
+	switch (aspect) {
+	case ASPECT_RATIO_4_3:
+	case ASPECT_RATIO_16_9:
+		hdmi_info->bits.M0_M1 = aspect;
+		break;
+
+	case ASPECT_RATIO_NO_DATA:
+	case ASPECT_RATIO_64_27:
+	case ASPECT_RATIO_256_135:
+	default:
+		hdmi_info->bits.M0_M1 = 0;
+	}
+
+	/* Active Format Aspect ratio - same as Picture Aspect Ratio. */
+	hdmi_info->bits.R0_R3 = ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE;
+
+	/* TODO: un-hardcode cn0_cn1 and itc */
+
+	cn0_cn1 = 0;
+	cn0_cn1_value = 0;
+
+	itc = true;
+	itc_value = 1;
+
+	support = stream->sink->edid_caps.content_support;
+
+	if (itc) {
+		if (!support.bits.valid_content_type) {
+			cn0_cn1_value = 0;
+		} else {
+			if (cn0_cn1 == DISPLAY_CONTENT_TYPE_GRAPHICS) {
+				if (support.bits.graphics_content == 1) {
+					cn0_cn1_value = 0;
+				}
+			} else if (cn0_cn1 == DISPLAY_CONTENT_TYPE_PHOTO) {
+				if (support.bits.photo_content == 1) {
+					cn0_cn1_value = 1;
+				} else {
+					cn0_cn1_value = 0;
+					itc_value = 0;
+				}
+			} else if (cn0_cn1 == DISPLAY_CONTENT_TYPE_CINEMA) {
+				if (support.bits.cinema_content == 1) {
+					cn0_cn1_value = 2;
+				} else {
+					cn0_cn1_value = 0;
+					itc_value = 0;
+				}
+			} else if (cn0_cn1 == DISPLAY_CONTENT_TYPE_GAME) {
+				if (support.bits.game_content == 1) {
+					cn0_cn1_value = 3;
+				} else {
+					cn0_cn1_value = 0;
+					itc_value = 0;
+				}
+			}
+		}
+		hdmi_info->bits.CN0_CN1 = cn0_cn1_value;
+		hdmi_info->bits.ITC = itc_value;
+	}
+
+	/* TODO : We should handle YCC quantization */
+	/* but we do not have matrix calculation */
+	if (stream->sink->edid_caps.qs_bit == 1 &&
+			stream->sink->edid_caps.qy_bit == 1) {
+		if (color_space == COLOR_SPACE_SRGB ||
+			color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
+			hdmi_info->bits.Q0_Q1   = RGB_QUANTIZATION_FULL_RANGE;
+			hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE;
+		} else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
+					color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
+			hdmi_info->bits.Q0_Q1   = RGB_QUANTIZATION_LIMITED_RANGE;
+			hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+		} else {
+			hdmi_info->bits.Q0_Q1   = RGB_QUANTIZATION_DEFAULT_RANGE;
+			hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+		}
+	} else {
+		hdmi_info->bits.Q0_Q1   = RGB_QUANTIZATION_DEFAULT_RANGE;
+		hdmi_info->bits.YQ0_YQ1   = YYC_QUANTIZATION_LIMITED_RANGE;
+	}
+
+	///VIC
+	format = stream->timing.timing_3d_format;
+	/*todo, add 3DStereo support*/
+	if (format != TIMING_3D_FORMAT_NONE) {
+		// Based on HDMI specs hdmi vic needs to be converted to cea vic when 3D is enabled
+		switch (pipe_ctx->stream->timing.hdmi_vic) {
+		case 1:
+			vic = 95;
+			break;
+		case 2:
+			vic = 94;
+			break;
+		case 3:
+			vic = 93;
+			break;
+		case 4:
+			vic = 98;
+			break;
+		default:
+			break;
+		}
+	}
+	hdmi_info->bits.VIC0_VIC7 = vic;
+
+	/* pixel repetition
+	 * PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
+	 * repetition start from 1 */
+	hdmi_info->bits.PR0_PR3 = 0;
+
+	/* Bar Info
+	 * barTop:    Line Number of End of Top Bar.
+	 * barBottom: Line Number of Start of Bottom Bar.
+	 * barLeft:   Pixel Number of End of Left Bar.
+	 * barRight:  Pixel Number of Start of Right Bar. */
+	hdmi_info->bits.bar_top = stream->timing.v_border_top;
+	hdmi_info->bits.bar_bottom = (stream->timing.v_total
+			- stream->timing.v_border_bottom + 1);
+	hdmi_info->bits.bar_left  = stream->timing.h_border_left;
+	hdmi_info->bits.bar_right = (stream->timing.h_total
+			- stream->timing.h_border_right + 1);
+
+	/* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */
+	check_sum = &info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.sb[0];
+
+	*check_sum = HDMI_INFOFRAME_TYPE_AVI + HDMI_AVI_INFOFRAME_SIZE + 2;
+
+	for (byte_index = 1; byte_index <= HDMI_AVI_INFOFRAME_SIZE; byte_index++)
+		*check_sum += hdmi_info->packet_raw_data.sb[byte_index];
+
+	/* one byte complement */
+	*check_sum = (uint8_t) (0x100 - *check_sum);
+
+	/* Store in hw_path_mode */
+	info_packet->hb0 = hdmi_info->packet_raw_data.hb0;
+	info_packet->hb1 = hdmi_info->packet_raw_data.hb1;
+	info_packet->hb2 = hdmi_info->packet_raw_data.hb2;
+
+	for (byte_index = 0; byte_index < sizeof(info_frame.avi_info_packet.
+				info_packet_hdmi.packet_raw_data.sb); byte_index++)
+		info_packet->sb[byte_index] = info_frame.avi_info_packet.
+				info_packet_hdmi.packet_raw_data.sb[byte_index];
+
+	info_packet->valid = true;
+}
+
+static void set_vendor_info_packet(
+		struct encoder_info_packet *info_packet,
+		struct dc_stream_state *stream)
+{
+	uint32_t length = 0;
+	bool hdmi_vic_mode = false;
+	uint8_t checksum = 0;
+	uint32_t i = 0;
+	enum dc_timing_3d_format format;
+	// Can be different depending on packet content /*todo*/
+	// unsigned int length = pPathMode->dolbyVision ? 24 : 5;
+
+	info_packet->valid = false;
+
+	format = stream->timing.timing_3d_format;
+	if (stream->view_format == VIEW_3D_FORMAT_NONE)
+		format = TIMING_3D_FORMAT_NONE;
+
+	/* Can be different depending on packet content */
+	length = 5;
+
+	if (stream->timing.hdmi_vic != 0
+			&& stream->timing.h_total >= 3840
+			&& stream->timing.v_total >= 2160)
+		hdmi_vic_mode = true;
+
+	/* According to HDMI 1.4a CTS, VSIF should be sent
+	 * for both 3D stereo and HDMI VIC modes.
+	 * For all other modes, there is no VSIF sent.  */
+
+	if (format == TIMING_3D_FORMAT_NONE && !hdmi_vic_mode)
+		return;
+
+	/* 24bit IEEE Registration identifier (0x000c03). LSB first. */
+	info_packet->sb[1] = 0x03;
+	info_packet->sb[2] = 0x0C;
+	info_packet->sb[3] = 0x00;
+
+	/*PB4: 5 lower bytes = 0 (reserved). 3 higher bits = HDMI_Video_Format.
+	 * The value for HDMI_Video_Format are:
+	 * 0x0 (0b000) - No additional HDMI video format is presented in this
+	 * packet
+	 * 0x1 (0b001) - Extended resolution format present. 1 byte of HDMI_VIC
+	 * parameter follows
+	 * 0x2 (0b010) - 3D format indication present. 3D_Structure and
+	 * potentially 3D_Ext_Data follows
+	 * 0x3..0x7 (0b011..0b111) - reserved for future use */
+	if (format != TIMING_3D_FORMAT_NONE)
+		info_packet->sb[4] = (2 << 5);
+	else if (hdmi_vic_mode)
+		info_packet->sb[4] = (1 << 5);
+
+	/* PB5: If PB4 claims 3D timing (HDMI_Video_Format = 0x2):
+	 * 4 lower bites = 0 (reserved). 4 higher bits = 3D_Structure.
+	 * The value for 3D_Structure are:
+	 * 0x0 - Frame Packing
+	 * 0x1 - Field Alternative
+	 * 0x2 - Line Alternative
+	 * 0x3 - Side-by-Side (full)
+	 * 0x4 - L + depth
+	 * 0x5 - L + depth + graphics + graphics-depth
+	 * 0x6 - Top-and-Bottom
+	 * 0x7 - Reserved for future use
+	 * 0x8 - Side-by-Side (Half)
+	 * 0x9..0xE - Reserved for future use
+	 * 0xF - Not used */
+	switch (format) {
+	case TIMING_3D_FORMAT_HW_FRAME_PACKING:
+	case TIMING_3D_FORMAT_SW_FRAME_PACKING:
+		info_packet->sb[5] = (0x0 << 4);
+		break;
+
+	case TIMING_3D_FORMAT_SIDE_BY_SIDE:
+	case TIMING_3D_FORMAT_SBS_SW_PACKED:
+		info_packet->sb[5] = (0x8 << 4);
+		length = 6;
+		break;
+
+	case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
+	case TIMING_3D_FORMAT_TB_SW_PACKED:
+		info_packet->sb[5] = (0x6 << 4);
+		break;
+
+	default:
+		break;
+	}
+
+	/*PB5: If PB4 is set to 0x1 (extended resolution format)
+	 * fill PB5 with the correct HDMI VIC code */
+	if (hdmi_vic_mode)
+		info_packet->sb[5] = stream->timing.hdmi_vic;
+
+	/* Header */
+	info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR; /* VSIF packet type. */
+	info_packet->hb1 = 0x01; /* Version */
+
+	/* 4 lower bits = Length, 4 higher bits = 0 (reserved) */
+	info_packet->hb2 = (uint8_t) (length);
+
+	/* Calculate checksum */
+	checksum = 0;
+	checksum += info_packet->hb0;
+	checksum += info_packet->hb1;
+	checksum += info_packet->hb2;
+
+	for (i = 1; i <= length; i++)
+		checksum += info_packet->sb[i];
+
+	info_packet->sb[0] = (uint8_t) (0x100 - checksum);
+
+	info_packet->valid = true;
+}
+
+static void set_spd_info_packet(
+		struct encoder_info_packet *info_packet,
+		struct dc_stream_state *stream)
+{
+	/* SPD info packet for FreeSync */
+
+	unsigned char checksum = 0;
+	unsigned int idx, payload_size = 0;
+
+	/* Check if Freesync is supported. Return if false. If true,
+	 * set the corresponding bit in the info packet
+	 */
+	if (stream->freesync_ctx.supported == false)
+		return;
+
+	if (dc_is_hdmi_signal(stream->signal)) {
+
+		/* HEADER */
+
+		/* HB0  = Packet Type = 0x83 (Source Product
+		 *	  Descriptor InfoFrame)
+		 */
+		info_packet->hb0 = HDMI_INFOFRAME_TYPE_SPD;
+
+		/* HB1  = Version = 0x01 */
+		info_packet->hb1 = 0x01;
+
+		/* HB2  = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x08] */
+		info_packet->hb2 = 0x08;
+
+		payload_size = 0x08;
+
+	} else if (dc_is_dp_signal(stream->signal)) {
+
+		/* HEADER */
+
+		/* HB0  = Secondary-data Packet ID = 0 - Only non-zero
+		 *	  when used to associate audio related info packets
+		 */
+		info_packet->hb0 = 0x00;
+
+		/* HB1  = Packet Type = 0x83 (Source Product
+		 *	  Descriptor InfoFrame)
+		 */
+		info_packet->hb1 = HDMI_INFOFRAME_TYPE_SPD;
+
+		/* HB2  = [Bits 7:0 = Least significant eight bits -
+		 *	  For INFOFRAME, the value must be 1Bh]
+		 */
+		info_packet->hb2 = 0x1B;
+
+		/* HB3  = [Bits 7:2 = INFOFRAME SDP Version Number = 0x1]
+		 *	  [Bits 1:0 = Most significant two bits = 0x00]
+		 */
+		info_packet->hb3 = 0x04;
+
+		payload_size = 0x1B;
+	}
+
+	/* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
+	info_packet->sb[1] = 0x1A;
+
+	/* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */
+	info_packet->sb[2] = 0x00;
+
+	/* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */
+	info_packet->sb[3] = 0x00;
+
+	/* PB4 = Reserved */
+	info_packet->sb[4] = 0x00;
+
+	/* PB5 = Reserved */
+	info_packet->sb[5] = 0x00;
+
+	/* PB6 = [Bits 7:3 = Reserved] */
+	info_packet->sb[6] = 0x00;
+
+	if (stream->freesync_ctx.supported == true)
+		/* PB6 = [Bit 0 = FreeSync Supported] */
+		info_packet->sb[6] |= 0x01;
+
+	if (stream->freesync_ctx.enabled == true)
+		/* PB6 = [Bit 1 = FreeSync Enabled] */
+		info_packet->sb[6] |= 0x02;
+
+	if (stream->freesync_ctx.active == true)
+		/* PB6 = [Bit 2 = FreeSync Active] */
+		info_packet->sb[6] |= 0x04;
+
+	/* PB7 = FreeSync Minimum refresh rate (Hz) */
+	info_packet->sb[7] = (unsigned char) (stream->freesync_ctx.
+			min_refresh_in_micro_hz / 1000000);
+
+	/* PB8 = FreeSync Maximum refresh rate (Hz)
+	 *
+	 * Note: We do not use the maximum capable refresh rate
+	 * of the panel, because we should never go above the field
+	 * rate of the mode timing set.
+	 */
+	info_packet->sb[8] = (unsigned char) (stream->freesync_ctx.
+			nominal_refresh_in_micro_hz / 1000000);
+
+	/* PB9 - PB27  = Reserved */
+	for (idx = 9; idx <= 27; idx++)
+		info_packet->sb[idx] = 0x00;
+
+	/* Calculate checksum */
+	checksum += info_packet->hb0;
+	checksum += info_packet->hb1;
+	checksum += info_packet->hb2;
+	checksum += info_packet->hb3;
+
+	for (idx = 1; idx <= payload_size; idx++)
+		checksum += info_packet->sb[idx];
+
+	/* PB0 = Checksum (one byte complement) */
+	info_packet->sb[0] = (unsigned char) (0x100 - checksum);
+
+	info_packet->valid = true;
+}
+
+static void set_hdr_static_info_packet(
+		struct encoder_info_packet *info_packet,
+		struct dc_plane_state *plane_state,
+		struct dc_stream_state *stream)
+{
+	uint16_t i = 0;
+	enum signal_type signal = stream->signal;
+	struct dc_hdr_static_metadata hdr_metadata;
+	uint32_t data;
+
+	if (!plane_state)
+		return;
+
+	hdr_metadata = plane_state->hdr_static_ctx;
+
+	if (!hdr_metadata.hdr_supported)
+		return;
+
+	if (dc_is_hdmi_signal(signal)) {
+		info_packet->valid = true;
+
+		info_packet->hb0 = 0x87;
+		info_packet->hb1 = 0x01;
+		info_packet->hb2 = 0x1A;
+		i = 1;
+	} else if (dc_is_dp_signal(signal)) {
+		info_packet->valid = true;
+
+		info_packet->hb0 = 0x00;
+		info_packet->hb1 = 0x87;
+		info_packet->hb2 = 0x1D;
+		info_packet->hb3 = (0x13 << 2);
+		i = 2;
+	}
+
+	data = hdr_metadata.is_hdr;
+	info_packet->sb[i++] = data ? 0x02 : 0x00;
+	info_packet->sb[i++] = 0x00;
+
+	data = hdr_metadata.chromaticity_green_x / 2;
+	info_packet->sb[i++] = data & 0xFF;
+	info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+	data = hdr_metadata.chromaticity_green_y / 2;
+	info_packet->sb[i++] = data & 0xFF;
+	info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+	data = hdr_metadata.chromaticity_blue_x / 2;
+	info_packet->sb[i++] = data & 0xFF;
+	info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+	data = hdr_metadata.chromaticity_blue_y / 2;
+	info_packet->sb[i++] = data & 0xFF;
+	info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+	data = hdr_metadata.chromaticity_red_x / 2;
+	info_packet->sb[i++] = data & 0xFF;
+	info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+	data = hdr_metadata.chromaticity_red_y / 2;
+	info_packet->sb[i++] = data & 0xFF;
+	info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+	data = hdr_metadata.chromaticity_white_point_x / 2;
+	info_packet->sb[i++] = data & 0xFF;
+	info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+	data = hdr_metadata.chromaticity_white_point_y / 2;
+	info_packet->sb[i++] = data & 0xFF;
+	info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+	data = hdr_metadata.max_luminance;
+	info_packet->sb[i++] = data & 0xFF;
+	info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+	data = hdr_metadata.min_luminance;
+	info_packet->sb[i++] = data & 0xFF;
+	info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+	data = hdr_metadata.maximum_content_light_level;
+	info_packet->sb[i++] = data & 0xFF;
+	info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+	data = hdr_metadata.maximum_frame_average_light_level;
+	info_packet->sb[i++] = data & 0xFF;
+	info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+	if (dc_is_hdmi_signal(signal)) {
+		uint32_t checksum = 0;
+
+		checksum += info_packet->hb0;
+		checksum += info_packet->hb1;
+		checksum += info_packet->hb2;
+
+		for (i = 1; i <= info_packet->hb2; i++)
+			checksum += info_packet->sb[i];
+
+		info_packet->sb[0] = 0x100 - checksum;
+	} else if (dc_is_dp_signal(signal)) {
+		info_packet->sb[0] = 0x01;
+		info_packet->sb[1] = 0x1A;
+	}
+}
+
+static void set_vsc_info_packet(
+		struct encoder_info_packet *info_packet,
+		struct dc_stream_state *stream)
+{
+	unsigned int vscPacketRevision = 0;
+	unsigned int i;
+
+	if (stream->sink->link->psr_enabled) {
+		vscPacketRevision = 2;
+	}
+
+	/* VSC packet not needed based on the features
+	 * supported by this DP display
+	 */
+	if (vscPacketRevision == 0)
+		return;
+
+	if (vscPacketRevision == 0x2) {
+		/* Secondary-data Packet ID = 0*/
+		info_packet->hb0 = 0x00;
+		/* 07h - Packet Type Value indicating Video
+		 * Stream Configuration packet
+		 */
+		info_packet->hb1 = 0x07;
+		/* 02h = VSC SDP supporting 3D stereo and PSR
+		 * (applies to eDP v1.3 or higher).
+		 */
+		info_packet->hb2 = 0x02;
+		/* 08h = VSC packet supporting 3D stereo + PSR
+		 * (HB2 = 02h).
+		 */
+		info_packet->hb3 = 0x08;
+
+		for (i = 0; i < 28; i++)
+			info_packet->sb[i] = 0;
+
+		info_packet->valid = true;
+	}
+
+	/*TODO: stereo 3D support and extend pixel encoding colorimetry*/
+}
+
+void dc_resource_state_destruct(struct dc_state *context)
+{
+	int i, j;
+
+	for (i = 0; i < context->stream_count; i++) {
+		for (j = 0; j < context->stream_status[i].plane_count; j++)
+			dc_plane_state_release(
+				context->stream_status[i].plane_states[j]);
+
+		context->stream_status[i].plane_count = 0;
+		dc_stream_release(context->streams[i]);
+		context->streams[i] = NULL;
+	}
+}
+
+/*
+ * Copy src_ctx into dst_ctx and retain all surfaces and streams referenced
+ * by the src_ctx
+ */
+void dc_resource_state_copy_construct(
+		const struct dc_state *src_ctx,
+		struct dc_state *dst_ctx)
+{
+	int i, j;
+	struct kref refcount = dst_ctx->refcount;
+
+	*dst_ctx = *src_ctx;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i];
+
+		if (cur_pipe->top_pipe)
+			cur_pipe->top_pipe =  &dst_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
+
+		if (cur_pipe->bottom_pipe)
+			cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+
+	}
+
+	for (i = 0; i < dst_ctx->stream_count; i++) {
+		dc_stream_retain(dst_ctx->streams[i]);
+		for (j = 0; j < dst_ctx->stream_status[i].plane_count; j++)
+			dc_plane_state_retain(
+				dst_ctx->stream_status[i].plane_states[j]);
+	}
+
+	/* context refcount should not be overridden */
+	dst_ctx->refcount = refcount;
+
+}
+
+struct clock_source *dc_resource_find_first_free_pll(
+		struct resource_context *res_ctx,
+		const struct resource_pool *pool)
+{
+	int i;
+
+	for (i = 0; i < pool->clk_src_count; ++i) {
+		if (res_ctx->clock_source_ref_count[i] == 0)
+			return pool->clock_sources[i];
+	}
+
+	return NULL;
+}
+
+void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
+{
+	enum signal_type signal = SIGNAL_TYPE_NONE;
+	struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+
+	/* default all packets to invalid */
+	info->avi.valid = false;
+	info->gamut.valid = false;
+	info->vendor.valid = false;
+	info->spd.valid = false;
+	info->hdrsmd.valid = false;
+	info->vsc.valid = false;
+
+	signal = pipe_ctx->stream->signal;
+
+	/* HDMi and DP have different info packets*/
+	if (dc_is_hdmi_signal(signal)) {
+		set_avi_info_frame(&info->avi, pipe_ctx);
+
+		set_vendor_info_packet(&info->vendor, pipe_ctx->stream);
+
+		set_spd_info_packet(&info->spd, pipe_ctx->stream);
+
+		set_hdr_static_info_packet(&info->hdrsmd,
+				pipe_ctx->plane_state, pipe_ctx->stream);
+
+	} else if (dc_is_dp_signal(signal)) {
+		set_vsc_info_packet(&info->vsc, pipe_ctx->stream);
+
+		set_spd_info_packet(&info->spd, pipe_ctx->stream);
+
+		set_hdr_static_info_packet(&info->hdrsmd,
+				pipe_ctx->plane_state, pipe_ctx->stream);
+	}
+
+	patch_gamut_packet_checksum(&info->gamut);
+}
+
+enum dc_status resource_map_clock_resources(
+		const struct dc  *dc,
+		struct dc_state *context,
+		struct dc_stream_state *stream)
+{
+	/* acquire new resources */
+	const struct resource_pool *pool = dc->res_pool;
+	struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(
+				&context->res_ctx, stream);
+
+	if (!pipe_ctx)
+		return DC_ERROR_UNEXPECTED;
+
+	if (dc_is_dp_signal(pipe_ctx->stream->signal)
+		|| pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+		pipe_ctx->clock_source = pool->dp_clock_source;
+	else {
+		pipe_ctx->clock_source = NULL;
+
+		if (!dc->config.disable_disp_pll_sharing)
+			pipe_ctx->clock_source = resource_find_used_clk_src_for_sharing(
+				&context->res_ctx,
+				pipe_ctx);
+
+		if (pipe_ctx->clock_source == NULL)
+			pipe_ctx->clock_source =
+				dc_resource_find_first_free_pll(
+					&context->res_ctx,
+					pool);
+	}
+
+	if (pipe_ctx->clock_source == NULL)
+		return DC_NO_CLOCK_SOURCE_RESOURCE;
+
+	resource_reference_clock_source(
+		&context->res_ctx, pool,
+		pipe_ctx->clock_source);
+
+	return DC_OK;
+}
+
+/*
+ * Note: We need to disable output if clock sources change,
+ * since bios does optimization and doesn't apply if changing
+ * PHY when not already disabled.
+ */
+bool pipe_need_reprogram(
+		struct pipe_ctx *pipe_ctx_old,
+		struct pipe_ctx *pipe_ctx)
+{
+	if (!pipe_ctx_old->stream)
+		return false;
+
+	if (pipe_ctx_old->stream->sink != pipe_ctx->stream->sink)
+		return true;
+
+	if (pipe_ctx_old->stream->signal != pipe_ctx->stream->signal)
+		return true;
+
+	if (pipe_ctx_old->stream_res.audio != pipe_ctx->stream_res.audio)
+		return true;
+
+	if (pipe_ctx_old->clock_source != pipe_ctx->clock_source
+			&& pipe_ctx_old->stream != pipe_ctx->stream)
+		return true;
+
+	if (pipe_ctx_old->stream_res.stream_enc != pipe_ctx->stream_res.stream_enc)
+		return true;
+
+	if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
+		return true;
+
+
+	return false;
+}
+
+void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
+		struct bit_depth_reduction_params *fmt_bit_depth)
+{
+	enum dc_dither_option option = stream->dither_option;
+	enum dc_pixel_encoding pixel_encoding =
+			stream->timing.pixel_encoding;
+
+	memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth));
+
+	if (option == DITHER_OPTION_DEFAULT) {
+		switch (stream->timing.display_color_depth) {
+		case COLOR_DEPTH_666:
+			option = DITHER_OPTION_SPATIAL6;
+			break;
+		case COLOR_DEPTH_888:
+			option = DITHER_OPTION_SPATIAL8;
+			break;
+		case COLOR_DEPTH_101010:
+			option = DITHER_OPTION_SPATIAL10;
+			break;
+		default:
+			option = DITHER_OPTION_DISABLE;
+		}
+	}
+
+	if (option == DITHER_OPTION_DISABLE)
+		return;
+
+	if (option == DITHER_OPTION_TRUN6) {
+		fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+		fmt_bit_depth->flags.TRUNCATE_DEPTH = 0;
+	} else if (option == DITHER_OPTION_TRUN8 ||
+			option == DITHER_OPTION_TRUN8_SPATIAL6 ||
+			option == DITHER_OPTION_TRUN8_FM6) {
+		fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+		fmt_bit_depth->flags.TRUNCATE_DEPTH = 1;
+	} else if (option == DITHER_OPTION_TRUN10        ||
+			option == DITHER_OPTION_TRUN10_SPATIAL6   ||
+			option == DITHER_OPTION_TRUN10_SPATIAL8   ||
+			option == DITHER_OPTION_TRUN10_FM8     ||
+			option == DITHER_OPTION_TRUN10_FM6     ||
+			option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+		fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+		fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+	}
+
+	/* special case - Formatter can only reduce by 4 bits at most.
+	 * When reducing from 12 to 6 bits,
+	 * HW recommends we use trunc with round mode
+	 * (if we did nothing, trunc to 10 bits would be used)
+	 * note that any 12->10 bit reduction is ignored prior to DCE8,
+	 * as the input was 10 bits.
+	 */
+	if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
+			option == DITHER_OPTION_SPATIAL6 ||
+			option == DITHER_OPTION_FM6) {
+		fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+		fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+		fmt_bit_depth->flags.TRUNCATE_MODE = 1;
+	}
+
+	/* spatial dither
+	 * note that spatial modes 1-3 are never used
+	 */
+	if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM            ||
+			option == DITHER_OPTION_SPATIAL6 ||
+			option == DITHER_OPTION_TRUN10_SPATIAL6      ||
+			option == DITHER_OPTION_TRUN8_SPATIAL6) {
+		fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+		fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0;
+		fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+		fmt_bit_depth->flags.RGB_RANDOM =
+				(pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+	} else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM            ||
+			option == DITHER_OPTION_SPATIAL8 ||
+			option == DITHER_OPTION_SPATIAL8_FM6        ||
+			option == DITHER_OPTION_TRUN10_SPATIAL8      ||
+			option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+		fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+		fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1;
+		fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+		fmt_bit_depth->flags.RGB_RANDOM =
+				(pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+	} else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM ||
+			option == DITHER_OPTION_SPATIAL10 ||
+			option == DITHER_OPTION_SPATIAL10_FM8 ||
+			option == DITHER_OPTION_SPATIAL10_FM6) {
+		fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+		fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2;
+		fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+		fmt_bit_depth->flags.RGB_RANDOM =
+				(pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+	}
+
+	if (option == DITHER_OPTION_SPATIAL6 ||
+			option == DITHER_OPTION_SPATIAL8 ||
+			option == DITHER_OPTION_SPATIAL10) {
+		fmt_bit_depth->flags.FRAME_RANDOM = 0;
+	} else {
+		fmt_bit_depth->flags.FRAME_RANDOM = 1;
+	}
+
+	//////////////////////
+	//// temporal dither
+	//////////////////////
+	if (option == DITHER_OPTION_FM6           ||
+			option == DITHER_OPTION_SPATIAL8_FM6     ||
+			option == DITHER_OPTION_SPATIAL10_FM6     ||
+			option == DITHER_OPTION_TRUN10_FM6     ||
+			option == DITHER_OPTION_TRUN8_FM6      ||
+			option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+		fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+		fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0;
+	} else if (option == DITHER_OPTION_FM8        ||
+			option == DITHER_OPTION_SPATIAL10_FM8  ||
+			option == DITHER_OPTION_TRUN10_FM8) {
+		fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+		fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1;
+	} else if (option == DITHER_OPTION_FM10) {
+		fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+		fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2;
+	}
+
+	fmt_bit_depth->pixel_encoding = pixel_encoding;
+}
+
+enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
+{
+	struct dc  *core_dc = dc;
+	struct dc_link *link = stream->sink->link;
+	struct timing_generator *tg = core_dc->res_pool->timing_generators[0];
+	enum dc_status res = DC_OK;
+
+	calculate_phy_pix_clks(stream);
+
+	if (!tg->funcs->validate_timing(tg, &stream->timing))
+		res = DC_FAIL_CONTROLLER_VALIDATE;
+
+	if (res == DC_OK)
+		if (!link->link_enc->funcs->validate_output_with_stream(
+						link->link_enc, stream))
+			res = DC_FAIL_ENC_VALIDATE;
+
+	/* TODO: validate audio ASIC caps, encoder */
+
+	if (res == DC_OK)
+		res = dc_link_validate_mode_timing(stream,
+		      link,
+		      &stream->timing);
+
+	return res;
+}
+
+enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state)
+{
+	enum dc_status res = DC_OK;
+
+	/* TODO For now validates pixel format only */
+	if (dc->res_pool->funcs->validate_plane)
+		return dc->res_pool->funcs->validate_plane(plane_state, &dc->caps);
+
+	return res;
+}

+ 104 - 0
drivers/gpu/drm/amd/display/dc/core/dc_sink.c

@@ -0,0 +1,104 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "core_types.h"
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+
+static void destruct(struct dc_sink *sink)
+{
+	if (sink->dc_container_id) {
+		kfree(sink->dc_container_id);
+		sink->dc_container_id = NULL;
+	}
+}
+
+static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params)
+{
+
+	struct dc_link *link = init_params->link;
+
+	if (!link)
+		return false;
+
+	sink->sink_signal = init_params->sink_signal;
+	sink->link = link;
+	sink->ctx = link->ctx;
+	sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk;
+	sink->converter_disable_audio = init_params->converter_disable_audio;
+	sink->dc_container_id = NULL;
+
+	return true;
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+
+void dc_sink_retain(struct dc_sink *sink)
+{
+	kref_get(&sink->refcount);
+}
+
+static void dc_sink_free(struct kref *kref)
+{
+	struct dc_sink *sink = container_of(kref, struct dc_sink, refcount);
+	destruct(sink);
+	kfree(sink);
+}
+
+void dc_sink_release(struct dc_sink *sink)
+{
+	kref_put(&sink->refcount, dc_sink_free);
+}
+
+struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params)
+{
+	struct dc_sink *sink = kzalloc(sizeof(*sink), GFP_KERNEL);
+
+	if (NULL == sink)
+		goto alloc_fail;
+
+	if (false == construct(sink, init_params))
+		goto construct_fail;
+
+	kref_init(&sink->refcount);
+
+	return sink;
+
+construct_fail:
+	kfree(sink);
+
+alloc_fail:
+	return NULL;
+}
+
+/*******************************************************************************
+ * Protected functions - visible only inside of DC (not visible in DM)
+ ******************************************************************************/

+ 398 - 0
drivers/gpu/drm/amd/display/dc/core/dc_stream.c

@@ -0,0 +1,398 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "resource.h"
+#include "ipp.h"
+#include "timing_generator.h"
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000
+static void update_stream_signal(struct dc_stream_state *stream)
+{
+	if (stream->output_signal == SIGNAL_TYPE_NONE) {
+		struct dc_sink *dc_sink = stream->sink;
+
+		if (dc_sink->sink_signal == SIGNAL_TYPE_NONE)
+			stream->signal = stream->sink->link->connector_signal;
+		else
+			stream->signal = dc_sink->sink_signal;
+	} else {
+		stream->signal = stream->output_signal;
+	}
+
+	if (dc_is_dvi_signal(stream->signal)) {
+		if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST &&
+			stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
+			stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+		else
+			stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+	}
+}
+
+static void construct(struct dc_stream_state *stream,
+	struct dc_sink *dc_sink_data)
+{
+	uint32_t i = 0;
+
+	stream->sink = dc_sink_data;
+	stream->ctx = stream->sink->ctx;
+
+	dc_sink_retain(dc_sink_data);
+
+	/* Copy audio modes */
+	/* TODO - Remove this translation */
+	for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++)
+	{
+		stream->audio_info.modes[i].channel_count = dc_sink_data->edid_caps.audio_modes[i].channel_count;
+		stream->audio_info.modes[i].format_code = dc_sink_data->edid_caps.audio_modes[i].format_code;
+		stream->audio_info.modes[i].sample_rates.all = dc_sink_data->edid_caps.audio_modes[i].sample_rate;
+		stream->audio_info.modes[i].sample_size = dc_sink_data->edid_caps.audio_modes[i].sample_size;
+	}
+	stream->audio_info.mode_count = dc_sink_data->edid_caps.audio_mode_count;
+	stream->audio_info.audio_latency = dc_sink_data->edid_caps.audio_latency;
+	stream->audio_info.video_latency = dc_sink_data->edid_caps.video_latency;
+	memmove(
+		stream->audio_info.display_name,
+		dc_sink_data->edid_caps.display_name,
+		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
+	stream->audio_info.manufacture_id = dc_sink_data->edid_caps.manufacturer_id;
+	stream->audio_info.product_id = dc_sink_data->edid_caps.product_id;
+	stream->audio_info.flags.all = dc_sink_data->edid_caps.speaker_flags;
+
+	if (dc_sink_data->dc_container_id != NULL) {
+		struct dc_container_id *dc_container_id = dc_sink_data->dc_container_id;
+
+		stream->audio_info.port_id[0] = dc_container_id->portId[0];
+		stream->audio_info.port_id[1] = dc_container_id->portId[1];
+	} else {
+		/* TODO - WindowDM has implemented,
+		other DMs need Unhardcode port_id */
+		stream->audio_info.port_id[0] = 0x5558859e;
+		stream->audio_info.port_id[1] = 0xd989449;
+	}
+
+	/* EDID CAP translation for HDMI 2.0 */
+	stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
+
+	stream->status.link = stream->sink->link;
+
+	update_stream_signal(stream);
+}
+
+static void destruct(struct dc_stream_state *stream)
+{
+	dc_sink_release(stream->sink);
+	if (stream->out_transfer_func != NULL) {
+		dc_transfer_func_release(
+				stream->out_transfer_func);
+		stream->out_transfer_func = NULL;
+	}
+}
+
+void dc_stream_retain(struct dc_stream_state *stream)
+{
+	kref_get(&stream->refcount);
+}
+
+static void dc_stream_free(struct kref *kref)
+{
+	struct dc_stream_state *stream = container_of(kref, struct dc_stream_state, refcount);
+
+	destruct(stream);
+	kfree(stream);
+}
+
+void dc_stream_release(struct dc_stream_state *stream)
+{
+	if (stream != NULL) {
+		kref_put(&stream->refcount, dc_stream_free);
+	}
+}
+
+struct dc_stream_state *dc_create_stream_for_sink(
+		struct dc_sink *sink)
+{
+	struct dc_stream_state *stream;
+
+	if (sink == NULL)
+		return NULL;
+
+	stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL);
+	if (stream == NULL)
+		return NULL;
+
+	construct(stream, sink);
+
+	kref_init(&stream->refcount);
+
+	return stream;
+}
+
+struct dc_stream_status *dc_stream_get_status(
+	struct dc_stream_state *stream)
+{
+	uint8_t i;
+	struct dc  *dc = stream->ctx->dc;
+
+	for (i = 0; i < dc->current_state->stream_count; i++) {
+		if (stream == dc->current_state->streams[i])
+			return &dc->current_state->stream_status[i];
+	}
+
+	return NULL;
+}
+
+/**
+ * Update the cursor attributes and set cursor surface address
+ */
+bool dc_stream_set_cursor_attributes(
+	struct dc_stream_state *stream,
+	const struct dc_cursor_attributes *attributes)
+{
+	int i;
+	struct dc  *core_dc;
+	struct resource_context *res_ctx;
+
+	if (NULL == stream) {
+		dm_error("DC: dc_stream is NULL!\n");
+		return false;
+	}
+	if (NULL == attributes) {
+		dm_error("DC: attributes is NULL!\n");
+		return false;
+	}
+
+	if (attributes->address.quad_part == 0) {
+		dm_output_to_console("DC: Cursor address is 0!\n");
+		return false;
+	}
+
+	core_dc = stream->ctx->dc;
+	res_ctx = &core_dc->current_state->res_ctx;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+		if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
+			continue;
+		if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+			continue;
+
+
+		if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes != NULL)
+			pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
+						pipe_ctx->plane_res.ipp, attributes);
+
+		if (pipe_ctx->plane_res.hubp != NULL &&
+				pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes != NULL)
+			pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
+					pipe_ctx->plane_res.hubp, attributes);
+
+		if (pipe_ctx->plane_res.mi != NULL &&
+				pipe_ctx->plane_res.mi->funcs->set_cursor_attributes != NULL)
+			pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
+					pipe_ctx->plane_res.mi, attributes);
+
+
+		if (pipe_ctx->plane_res.xfm != NULL &&
+				pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes != NULL)
+			pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
+				pipe_ctx->plane_res.xfm, attributes);
+
+		if (pipe_ctx->plane_res.dpp != NULL &&
+				pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL)
+			pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
+				pipe_ctx->plane_res.dpp, attributes);
+	}
+
+	stream->cursor_attributes = *attributes;
+
+	return true;
+}
+
+bool dc_stream_set_cursor_position(
+	struct dc_stream_state *stream,
+	const struct dc_cursor_position *position)
+{
+	int i;
+	struct dc  *core_dc;
+	struct resource_context *res_ctx;
+
+	if (NULL == stream) {
+		dm_error("DC: dc_stream is NULL!\n");
+		return false;
+	}
+
+	if (NULL == position) {
+		dm_error("DC: cursor position is NULL!\n");
+		return false;
+	}
+
+	core_dc = stream->ctx->dc;
+	res_ctx = &core_dc->current_state->res_ctx;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+		struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
+		struct mem_input *mi = pipe_ctx->plane_res.mi;
+		struct hubp *hubp = pipe_ctx->plane_res.hubp;
+		struct transform *xfm = pipe_ctx->plane_res.xfm;
+		struct dpp *dpp = pipe_ctx->plane_res.dpp;
+		struct dc_cursor_position pos_cpy = *position;
+		struct dc_cursor_mi_param param = {
+			.pixel_clk_khz = stream->timing.pix_clk_khz,
+			.ref_clk_khz = core_dc->res_pool->ref_clock_inKhz,
+			.viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
+			.viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
+			.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+		};
+
+		if (pipe_ctx->stream != stream ||
+				(!pipe_ctx->plane_res.mi  && !pipe_ctx->plane_res.hubp) ||
+				!pipe_ctx->plane_state ||
+				(!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
+			continue;
+
+		if (pipe_ctx->plane_state->address.type
+				== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+			pos_cpy.enable = false;
+
+		if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+			pos_cpy.enable = false;
+
+
+		if (ipp != NULL && ipp->funcs->ipp_cursor_set_position != NULL)
+			ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param);
+
+		if (mi != NULL && mi->funcs->set_cursor_position != NULL)
+			mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
+
+		if (hubp != NULL && hubp->funcs->set_cursor_position != NULL)
+			hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
+
+		if (xfm != NULL && xfm->funcs->set_cursor_position != NULL)
+			xfm->funcs->set_cursor_position(xfm, &pos_cpy, &param, hubp->curs_attr.width);
+
+		if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
+			dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
+
+	}
+
+	return true;
+}
+
+uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
+{
+	uint8_t i;
+	struct dc  *core_dc = stream->ctx->dc;
+	struct resource_context *res_ctx =
+		&core_dc->current_state->res_ctx;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+
+		if (res_ctx->pipe_ctx[i].stream != stream)
+			continue;
+
+		return tg->funcs->get_frame_count(tg);
+	}
+
+	return 0;
+}
+
+bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
+				  uint32_t *v_blank_start,
+				  uint32_t *v_blank_end,
+				  uint32_t *h_position,
+				  uint32_t *v_position)
+{
+	uint8_t i;
+	bool ret = false;
+	struct dc  *core_dc = stream->ctx->dc;
+	struct resource_context *res_ctx =
+		&core_dc->current_state->res_ctx;
+
+	for (i = 0; i < MAX_PIPES; i++) {
+		struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+
+		if (res_ctx->pipe_ctx[i].stream != stream)
+			continue;
+
+		tg->funcs->get_scanoutpos(tg,
+					  v_blank_start,
+					  v_blank_end,
+					  h_position,
+					  v_position);
+
+		ret = true;
+		break;
+	}
+
+	return ret;
+}
+
+
+void dc_stream_log(
+	const struct dc_stream_state *stream,
+	struct dal_logger *dm_logger,
+	enum dc_log_type log_type)
+{
+
+	dm_logger_write(dm_logger,
+			log_type,
+			"core_stream 0x%x: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
+			stream,
+			stream->src.x,
+			stream->src.y,
+			stream->src.width,
+			stream->src.height,
+			stream->dst.x,
+			stream->dst.y,
+			stream->dst.width,
+			stream->dst.height,
+			stream->output_color_space);
+	dm_logger_write(dm_logger,
+			log_type,
+			"\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n",
+			stream->timing.pix_clk_khz,
+			stream->timing.h_total,
+			stream->timing.v_total,
+			stream->timing.pixel_encoding,
+			stream->timing.display_color_depth);
+	dm_logger_write(dm_logger,
+			log_type,
+			"\tsink name: %s, serial: %d\n",
+			stream->sink->edid_caps.display_name,
+			stream->sink->edid_caps.serial_number);
+	dm_logger_write(dm_logger,
+			log_type,
+			"\tlink: %d\n",
+			stream->sink->link->link_index);
+}

+ 193 - 0
drivers/gpu/drm/amd/display/dc/core/dc_surface.c

@@ -0,0 +1,193 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* DC interface (public) */
+#include "dm_services.h"
+#include "dc.h"
+
+/* DC core (private) */
+#include "core_types.h"
+#include "transform.h"
+#include "dpp.h"
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
+{
+	plane_state->ctx = ctx;
+}
+
+static void destruct(struct dc_plane_state *plane_state)
+{
+	if (plane_state->gamma_correction != NULL) {
+		dc_gamma_release(&plane_state->gamma_correction);
+	}
+	if (plane_state->in_transfer_func != NULL) {
+		dc_transfer_func_release(
+				plane_state->in_transfer_func);
+		plane_state->in_transfer_func = NULL;
+	}
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
+		uint32_t controller_id)
+{
+	plane_state->irq_source = controller_id + DC_IRQ_SOURCE_PFLIP1 - 1;
+	/*register_flip_interrupt(surface);*/
+}
+
+struct dc_plane_state *dc_create_plane_state(struct dc *dc)
+{
+	struct dc *core_dc = dc;
+
+	struct dc_plane_state *plane_state = kzalloc(sizeof(*plane_state),
+						     GFP_KERNEL);
+
+	if (NULL == plane_state)
+		return NULL;
+
+	kref_init(&plane_state->refcount);
+	construct(core_dc->ctx, plane_state);
+
+	return plane_state;
+}
+
+const struct dc_plane_status *dc_plane_get_status(
+		const struct dc_plane_state *plane_state)
+{
+	const struct dc_plane_status *plane_status;
+	struct dc  *core_dc;
+	int i;
+
+	if (!plane_state ||
+		!plane_state->ctx ||
+		!plane_state->ctx->dc) {
+		ASSERT(0);
+		return NULL; /* remove this if above assert never hit */
+	}
+
+	plane_status = &plane_state->status;
+	core_dc = plane_state->ctx->dc;
+
+	if (core_dc->current_state == NULL)
+		return NULL;
+
+	for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx =
+				&core_dc->current_state->res_ctx.pipe_ctx[i];
+
+		if (pipe_ctx->plane_state != plane_state)
+			continue;
+
+		core_dc->hwss.update_pending_status(pipe_ctx);
+	}
+
+	return plane_status;
+}
+
+void dc_plane_state_retain(struct dc_plane_state *plane_state)
+{
+	kref_get(&plane_state->refcount);
+}
+
+static void dc_plane_state_free(struct kref *kref)
+{
+	struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount);
+	destruct(plane_state);
+	kfree(plane_state);
+}
+
+void dc_plane_state_release(struct dc_plane_state *plane_state)
+{
+	kref_put(&plane_state->refcount, dc_plane_state_free);
+}
+
+void dc_gamma_retain(struct dc_gamma *gamma)
+{
+	kref_get(&gamma->refcount);
+}
+
+static void dc_gamma_free(struct kref *kref)
+{
+	struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount);
+	kfree(gamma);
+}
+
+void dc_gamma_release(struct dc_gamma **gamma)
+{
+	kref_put(&(*gamma)->refcount, dc_gamma_free);
+	*gamma = NULL;
+}
+
+struct dc_gamma *dc_create_gamma(void)
+{
+	struct dc_gamma *gamma = kzalloc(sizeof(*gamma), GFP_KERNEL);
+
+	if (gamma == NULL)
+		goto alloc_fail;
+
+	kref_init(&gamma->refcount);
+	return gamma;
+
+alloc_fail:
+	return NULL;
+}
+
+void dc_transfer_func_retain(struct dc_transfer_func *tf)
+{
+	kref_get(&tf->refcount);
+}
+
+static void dc_transfer_func_free(struct kref *kref)
+{
+	struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount);
+	kfree(tf);
+}
+
+void dc_transfer_func_release(struct dc_transfer_func *tf)
+{
+	kref_put(&tf->refcount, dc_transfer_func_free);
+}
+
+struct dc_transfer_func *dc_create_transfer_func(void)
+{
+	struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL);
+
+	if (tf == NULL)
+		goto alloc_fail;
+
+	kref_init(&tf->refcount);
+
+	return tf;
+
+alloc_fail:
+	return NULL;
+}
+
+

+ 1103 - 0
drivers/gpu/drm/amd/display/dc/dc.h

@@ -0,0 +1,1103 @@
+/*
+ * Copyright 2012-14 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_INTERFACE_H_
+#define DC_INTERFACE_H_
+
+#include "dc_types.h"
+#include "grph_object_defs.h"
+#include "logger_types.h"
+#include "gpio_types.h"
+#include "link_service_types.h"
+#include "grph_object_ctrl_defs.h"
+#include <inc/hw/opp.h>
+
+#include "inc/hw_sequencer.h"
+#include "inc/compressor.h"
+#include "dml/display_mode_lib.h"
+
+#define DC_VER "3.1.07"
+
+#define MAX_SURFACES 3
+#define MAX_STREAMS 6
+#define MAX_SINKS_PER_LINK 4
+
+
+/*******************************************************************************
+ * Display Core Interfaces
+ ******************************************************************************/
+struct dc_caps {
+	uint32_t max_streams;
+	uint32_t max_links;
+	uint32_t max_audios;
+	uint32_t max_slave_planes;
+	uint32_t max_planes;
+	uint32_t max_downscale_ratio;
+	uint32_t i2c_speed_in_khz;
+	unsigned int max_cursor_size;
+	unsigned int max_video_width;
+	bool dcc_const_color;
+	bool dynamic_audio;
+};
+
+struct dc_dcc_surface_param {
+	struct dc_size surface_size;
+	enum surface_pixel_format format;
+	enum swizzle_mode_values swizzle_mode;
+	enum dc_scan_direction scan;
+};
+
+struct dc_dcc_setting {
+	unsigned int max_compressed_blk_size;
+	unsigned int max_uncompressed_blk_size;
+	bool independent_64b_blks;
+};
+
+struct dc_surface_dcc_cap {
+	union {
+		struct {
+			struct dc_dcc_setting rgb;
+		} grph;
+
+		struct {
+			struct dc_dcc_setting luma;
+			struct dc_dcc_setting chroma;
+		} video;
+	};
+
+	bool capable;
+	bool const_color_support;
+};
+
+struct dc_static_screen_events {
+	bool cursor_update;
+	bool surface_update;
+	bool overlay_update;
+};
+
+/* Forward declaration*/
+struct dc;
+struct dc_plane_state;
+struct dc_state;
+
+struct dc_cap_funcs {
+	bool (*get_dcc_compression_cap)(const struct dc *dc,
+			const struct dc_dcc_surface_param *input,
+			struct dc_surface_dcc_cap *output);
+};
+
+struct dc_stream_state_funcs {
+	bool (*adjust_vmin_vmax)(struct dc *dc,
+			struct dc_stream_state **stream,
+			int num_streams,
+			int vmin,
+			int vmax);
+	bool (*get_crtc_position)(struct dc *dc,
+			struct dc_stream_state **stream,
+			int num_streams,
+			unsigned int *v_pos,
+			unsigned int *nom_v_pos);
+
+	bool (*set_gamut_remap)(struct dc *dc,
+			const struct dc_stream_state *stream);
+
+	bool (*program_csc_matrix)(struct dc *dc,
+			struct dc_stream_state *stream);
+
+	void (*set_static_screen_events)(struct dc *dc,
+			struct dc_stream_state **stream,
+			int num_streams,
+			const struct dc_static_screen_events *events);
+
+	void (*set_dither_option)(struct dc_stream_state *stream,
+			enum dc_dither_option option);
+
+	void (*set_dpms)(struct dc *dc,
+			struct dc_stream_state *stream,
+			bool dpms_off);
+};
+
+struct link_training_settings;
+
+struct dc_link_funcs {
+	void (*set_drive_settings)(struct dc *dc,
+			struct link_training_settings *lt_settings,
+			const struct dc_link *link);
+	void (*perform_link_training)(struct dc *dc,
+			struct dc_link_settings *link_setting,
+			bool skip_video_pattern);
+	void (*set_preferred_link_settings)(struct dc *dc,
+			struct dc_link_settings *link_setting,
+			struct dc_link *link);
+	void (*enable_hpd)(const struct dc_link *link);
+	void (*disable_hpd)(const struct dc_link *link);
+	void (*set_test_pattern)(
+			struct dc_link *link,
+			enum dp_test_pattern test_pattern,
+			const struct link_training_settings *p_link_settings,
+			const unsigned char *p_custom_pattern,
+			unsigned int cust_pattern_size);
+};
+
+/* Structure to hold configuration flags set by dm at dc creation. */
+struct dc_config {
+	bool gpu_vm_support;
+	bool disable_disp_pll_sharing;
+};
+
+enum dcc_option {
+	DCC_ENABLE = 0,
+	DCC_DISABLE = 1,
+	DCC_HALF_REQ_DISALBE = 2,
+};
+
+enum pipe_split_policy {
+	MPC_SPLIT_DYNAMIC = 0,
+	MPC_SPLIT_AVOID = 1,
+	MPC_SPLIT_AVOID_MULT_DISP = 2,
+};
+
+enum wm_report_mode {
+	WM_REPORT_DEFAULT = 0,
+	WM_REPORT_OVERRIDE = 1,
+};
+
+struct dc_debug {
+	bool surface_visual_confirm;
+	bool sanity_checks;
+	bool max_disp_clk;
+	bool surface_trace;
+	bool timing_trace;
+	bool clock_trace;
+	bool validation_trace;
+
+	/* stutter efficiency related */
+	bool disable_stutter;
+	bool use_max_lb;
+	enum dcc_option disable_dcc;
+	enum pipe_split_policy pipe_split_policy;
+	bool force_single_disp_pipe_split;
+	bool voltage_align_fclk;
+
+	bool disable_dfs_bypass;
+	bool disable_dpp_power_gate;
+	bool disable_hubp_power_gate;
+	bool disable_pplib_wm_range;
+	enum wm_report_mode pplib_wm_report_mode;
+	unsigned int min_disp_clk_khz;
+	int sr_exit_time_dpm0_ns;
+	int sr_enter_plus_exit_time_dpm0_ns;
+	int sr_exit_time_ns;
+	int sr_enter_plus_exit_time_ns;
+	int urgent_latency_ns;
+	int percent_of_ideal_drambw;
+	int dram_clock_change_latency_ns;
+	int always_scale;
+	bool disable_pplib_clock_request;
+	bool disable_clock_gate;
+	bool disable_dmcu;
+	bool disable_psr;
+	bool force_abm_enable;
+	bool disable_hbup_pg;
+	bool disable_dpp_pg;
+	bool disable_stereo_support;
+	bool vsr_support;
+	bool performance_trace;
+};
+struct dc_state;
+struct resource_pool;
+struct dce_hwseq;
+struct dc {
+	struct dc_caps caps;
+	struct dc_cap_funcs cap_funcs;
+	struct dc_stream_state_funcs stream_funcs;
+	struct dc_link_funcs link_funcs;
+	struct dc_config config;
+	struct dc_debug debug;
+
+	struct dc_context *ctx;
+
+	uint8_t link_count;
+	struct dc_link *links[MAX_PIPES * 2];
+
+	struct dc_state *current_state;
+	struct resource_pool *res_pool;
+
+	/* Display Engine Clock levels */
+	struct dm_pp_clock_levels sclk_lvls;
+
+	/* Inputs into BW and WM calculations. */
+	struct bw_calcs_dceip *bw_dceip;
+	struct bw_calcs_vbios *bw_vbios;
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+	struct dcn_soc_bounding_box *dcn_soc;
+	struct dcn_ip_params *dcn_ip;
+	struct display_mode_lib dml;
+#endif
+
+	/* HW functions */
+	struct hw_sequencer_funcs hwss;
+	struct dce_hwseq *hwseq;
+
+	/* temp store of dm_pp_display_configuration
+	 * to compare to see if display config changed
+	 */
+	struct dm_pp_display_configuration prev_display_config;
+
+	/* FBC compressor */
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+	struct compressor *fbc_compressor;
+#endif
+};
+
+enum frame_buffer_mode {
+	FRAME_BUFFER_MODE_LOCAL_ONLY = 0,
+	FRAME_BUFFER_MODE_ZFB_ONLY,
+	FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL,
+} ;
+
+struct dchub_init_data {
+	int64_t zfb_phys_addr_base;
+	int64_t zfb_mc_base_addr;
+	uint64_t zfb_size_in_byte;
+	enum frame_buffer_mode fb_mode;
+	bool dchub_initialzied;
+	bool dchub_info_valid;
+};
+
+struct dc_init_data {
+	struct hw_asic_id asic_id;
+	void *driver; /* ctx */
+	struct cgs_device *cgs_device;
+
+	int num_virtual_links;
+	/*
+	 * If 'vbios_override' not NULL, it will be called instead
+	 * of the real VBIOS. Intended use is Diagnostics on FPGA.
+	 */
+	struct dc_bios *vbios_override;
+	enum dce_environment dce_environment;
+
+	struct dc_config flags;
+	uint32_t log_mask;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+	uint64_t fbc_gpu_addr;
+#endif
+};
+
+struct dc *dc_create(const struct dc_init_data *init_params);
+
+void dc_destroy(struct dc **dc);
+
+/*******************************************************************************
+ * Surface Interfaces
+ ******************************************************************************/
+
+enum {
+	TRANSFER_FUNC_POINTS = 1025
+};
+
+// Moved here from color module for linux
+enum color_transfer_func {
+	transfer_func_unknown,
+	transfer_func_srgb,
+	transfer_func_bt709,
+	transfer_func_pq2084,
+	transfer_func_pq2084_interim,
+	transfer_func_linear_0_1,
+	transfer_func_linear_0_125,
+	transfer_func_dolbyvision,
+	transfer_func_gamma_22,
+	transfer_func_gamma_26
+};
+
+enum color_color_space {
+	color_space_unsupported,
+	color_space_srgb,
+	color_space_bt601,
+	color_space_bt709,
+	color_space_xv_ycc_bt601,
+	color_space_xv_ycc_bt709,
+	color_space_xr_rgb,
+	color_space_bt2020,
+	color_space_adobe,
+	color_space_dci_p3,
+	color_space_sc_rgb_ms_ref,
+	color_space_display_native,
+	color_space_app_ctrl,
+	color_space_dolby_vision,
+	color_space_custom_coordinates
+};
+
+struct dc_hdr_static_metadata {
+	/* display chromaticities and white point in units of 0.00001 */
+	unsigned int chromaticity_green_x;
+	unsigned int chromaticity_green_y;
+	unsigned int chromaticity_blue_x;
+	unsigned int chromaticity_blue_y;
+	unsigned int chromaticity_red_x;
+	unsigned int chromaticity_red_y;
+	unsigned int chromaticity_white_point_x;
+	unsigned int chromaticity_white_point_y;
+
+	uint32_t min_luminance;
+	uint32_t max_luminance;
+	uint32_t maximum_content_light_level;
+	uint32_t maximum_frame_average_light_level;
+
+	bool hdr_supported;
+	bool is_hdr;
+};
+
+enum dc_transfer_func_type {
+	TF_TYPE_PREDEFINED,
+	TF_TYPE_DISTRIBUTED_POINTS,
+	TF_TYPE_BYPASS
+};
+
+struct dc_transfer_func_distributed_points {
+	struct fixed31_32 red[TRANSFER_FUNC_POINTS];
+	struct fixed31_32 green[TRANSFER_FUNC_POINTS];
+	struct fixed31_32 blue[TRANSFER_FUNC_POINTS];
+
+	uint16_t end_exponent;
+	uint16_t x_point_at_y1_red;
+	uint16_t x_point_at_y1_green;
+	uint16_t x_point_at_y1_blue;
+};
+
+enum dc_transfer_func_predefined {
+	TRANSFER_FUNCTION_SRGB,
+	TRANSFER_FUNCTION_BT709,
+	TRANSFER_FUNCTION_PQ,
+	TRANSFER_FUNCTION_LINEAR,
+};
+
+struct dc_transfer_func {
+	struct kref refcount;
+	struct dc_transfer_func_distributed_points tf_pts;
+	enum dc_transfer_func_type type;
+	enum dc_transfer_func_predefined tf;
+	struct dc_context *ctx;
+};
+
+/*
+ * This structure is filled in by dc_surface_get_status and contains
+ * the last requested address and the currently active address so the called
+ * can determine if there are any outstanding flips
+ */
+struct dc_plane_status {
+	struct dc_plane_address requested_address;
+	struct dc_plane_address current_address;
+	bool is_flip_pending;
+	bool is_right_eye;
+};
+
+struct dc_plane_state {
+	struct dc_plane_address address;
+	struct scaling_taps scaling_quality;
+	struct rect src_rect;
+	struct rect dst_rect;
+	struct rect clip_rect;
+
+	union plane_size plane_size;
+	union dc_tiling_info tiling_info;
+
+	struct dc_plane_dcc_param dcc;
+	struct dc_hdr_static_metadata hdr_static_ctx;
+
+	struct dc_gamma *gamma_correction;
+	struct dc_transfer_func *in_transfer_func;
+
+	// sourceContentAttribute cache
+	bool is_source_input_valid;
+	struct dc_hdr_static_metadata source_input_mastering_info;
+	enum color_color_space source_input_color_space;
+	enum color_transfer_func source_input_tf;
+
+	enum dc_color_space color_space;
+	enum surface_pixel_format format;
+	enum dc_rotation_angle rotation;
+	enum plane_stereo_format stereo_format;
+
+	bool per_pixel_alpha;
+	bool visible;
+	bool flip_immediate;
+	bool horizontal_mirror;
+
+	/* private to DC core */
+	struct dc_plane_status status;
+	struct dc_context *ctx;
+
+	/* private to dc_surface.c */
+	enum dc_irq_source irq_source;
+	struct kref refcount;
+};
+
+struct dc_plane_info {
+	union plane_size plane_size;
+	union dc_tiling_info tiling_info;
+	struct dc_plane_dcc_param dcc;
+	enum surface_pixel_format format;
+	enum dc_rotation_angle rotation;
+	enum plane_stereo_format stereo_format;
+	enum dc_color_space color_space; /*todo: wrong place, fits in scaling info*/
+	bool horizontal_mirror;
+	bool visible;
+	bool per_pixel_alpha;
+};
+
+struct dc_scaling_info {
+	struct rect src_rect;
+	struct rect dst_rect;
+	struct rect clip_rect;
+	struct scaling_taps scaling_quality;
+};
+
+struct dc_surface_update {
+	struct dc_plane_state *surface;
+
+	/* isr safe update parameters.  null means no updates */
+	struct dc_flip_addrs *flip_addr;
+	struct dc_plane_info *plane_info;
+	struct dc_scaling_info *scaling_info;
+	/* following updates require alloc/sleep/spin that is not isr safe,
+	 * null means no updates
+	 */
+	/* gamma TO BE REMOVED */
+	struct dc_gamma *gamma;
+	struct dc_transfer_func *in_transfer_func;
+	struct dc_hdr_static_metadata *hdr_static_metadata;
+};
+
+/*
+ * Create a new surface with default parameters;
+ */
+struct dc_plane_state *dc_create_plane_state(struct dc *dc);
+const struct dc_plane_status *dc_plane_get_status(
+		const struct dc_plane_state *plane_state);
+
+void dc_plane_state_retain(struct dc_plane_state *plane_state);
+void dc_plane_state_release(struct dc_plane_state *plane_state);
+
+void dc_gamma_retain(struct dc_gamma *dc_gamma);
+void dc_gamma_release(struct dc_gamma **dc_gamma);
+struct dc_gamma *dc_create_gamma(void);
+
+void dc_transfer_func_retain(struct dc_transfer_func *dc_tf);
+void dc_transfer_func_release(struct dc_transfer_func *dc_tf);
+struct dc_transfer_func *dc_create_transfer_func(void);
+
+/*
+ * This structure holds a surface address.  There could be multiple addresses
+ * in cases such as Stereo 3D, Planar YUV, etc.  Other per-flip attributes such
+ * as frame durations and DCC format can also be set.
+ */
+struct dc_flip_addrs {
+	struct dc_plane_address address;
+	bool flip_immediate;
+	/* TODO: add flip duration for FreeSync */
+};
+
+bool dc_post_update_surfaces_to_stream(
+		struct dc *dc);
+
+/* Surface update type is used by dc_update_surfaces_and_stream
+ * The update type is determined at the very beginning of the function based
+ * on parameters passed in and decides how much programming (or updating) is
+ * going to be done during the call.
+ *
+ * UPDATE_TYPE_FAST is used for really fast updates that do not require much
+ * logical calculations or hardware register programming. This update MUST be
+ * ISR safe on windows. Currently fast update will only be used to flip surface
+ * address.
+ *
+ * UPDATE_TYPE_MED is used for slower updates which require significant hw
+ * re-programming however do not affect bandwidth consumption or clock
+ * requirements. At present, this is the level at which front end updates
+ * that do not require us to run bw_calcs happen. These are in/out transfer func
+ * updates, viewport offset changes, recout size changes and pixel depth changes.
+ * This update can be done at ISR, but we want to minimize how often this happens.
+ *
+ * UPDATE_TYPE_FULL is slow. Really slow. This requires us to recalculate our
+ * bandwidth and clocks, possibly rearrange some pipes and reprogram anything front
+ * end related. Any time viewport dimensions, recout dimensions, scaling ratios or
+ * gamma need to be adjusted or pipe needs to be turned on (or disconnected) we do
+ * a full update. This cannot be done at ISR level and should be a rare event.
+ * Unless someone is stress testing mpo enter/exit, playing with colour or adjusting
+ * underscan we don't expect to see this call at all.
+ */
+
+enum surface_update_type {
+	UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
+	UPDATE_TYPE_MED,  /* ISR safe, most of programming needed, no bw/clk change*/
+	UPDATE_TYPE_FULL, /* may need to shuffle resources */
+};
+
+/*******************************************************************************
+ * Stream Interfaces
+ ******************************************************************************/
+
+struct dc_stream_status {
+	int primary_otg_inst;
+	int stream_enc_inst;
+	int plane_count;
+	struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
+
+	/*
+	 * link this stream passes through
+	 */
+	struct dc_link *link;
+};
+
+struct dc_stream_state {
+	struct dc_sink *sink;
+	struct dc_crtc_timing timing;
+
+	struct rect src; /* composition area */
+	struct rect dst; /* stream addressable area */
+
+	struct audio_info audio_info;
+
+	struct freesync_context freesync_ctx;
+
+	struct dc_transfer_func *out_transfer_func;
+	struct colorspace_transform gamut_remap_matrix;
+	struct csc_transform csc_color_matrix;
+
+	enum signal_type output_signal;
+
+	enum dc_color_space output_color_space;
+	enum dc_dither_option dither_option;
+
+	enum view_3d_format view_format;
+
+	bool ignore_msa_timing_param;
+	/* TODO: custom INFO packets */
+	/* TODO: ABM info (DMCU) */
+	/* TODO: PSR info */
+	/* TODO: CEA VIC */
+
+	/* from core_stream struct */
+	struct dc_context *ctx;
+
+	/* used by DCP and FMT */
+	struct bit_depth_reduction_params bit_depth_params;
+	struct clamping_and_pixel_encoding_params clamping;
+
+	int phy_pix_clk;
+	enum signal_type signal;
+	bool dpms_off;
+
+	struct dc_stream_status status;
+
+	struct dc_cursor_attributes cursor_attributes;
+
+	/* from stream struct */
+	struct kref refcount;
+};
+
+struct dc_stream_update {
+	struct rect src;
+	struct rect dst;
+	struct dc_transfer_func *out_transfer_func;
+};
+
+bool dc_is_stream_unchanged(
+	struct dc_stream_state *old_stream, struct dc_stream_state *stream);
+bool dc_is_stream_scaling_unchanged(
+	struct dc_stream_state *old_stream, struct dc_stream_state *stream);
+
+/*
+ * Set up surface attributes and associate to a stream
+ * The surfaces parameter is an absolute set of all surface active for the stream.
+ * If no surfaces are provided, the stream will be blanked; no memory read.
+ * Any flip related attribute changes must be done through this interface.
+ *
+ * After this call:
+ *   Surfaces attributes are programmed and configured to be composed into stream.
+ *   This does not trigger a flip.  No surface address is programmed.
+ */
+
+bool dc_commit_planes_to_stream(
+		struct dc *dc,
+		struct dc_plane_state **plane_states,
+		uint8_t new_plane_count,
+		struct dc_stream_state *dc_stream,
+		struct dc_state *state);
+
+void dc_commit_updates_for_stream(struct dc *dc,
+		struct dc_surface_update *srf_updates,
+		int surface_count,
+		struct dc_stream_state *stream,
+		struct dc_stream_update *stream_update,
+		struct dc_plane_state **plane_states,
+		struct dc_state *state);
+/*
+ * Log the current stream state.
+ */
+void dc_stream_log(
+	const struct dc_stream_state *stream,
+	struct dal_logger *dc_logger,
+	enum dc_log_type log_type);
+
+uint8_t dc_get_current_stream_count(struct dc *dc);
+struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
+
+/*
+ * Return the current frame counter.
+ */
+uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream);
+
+/* TODO: Return parsed values rather than direct register read
+ * This has a dependency on the caller (amdgpu_get_crtc_scanoutpos)
+ * being refactored properly to be dce-specific
+ */
+bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
+				  uint32_t *v_blank_start,
+				  uint32_t *v_blank_end,
+				  uint32_t *h_position,
+				  uint32_t *v_position);
+
+enum dc_status dc_add_stream_to_ctx(
+			struct dc *dc,
+		struct dc_state *new_ctx,
+		struct dc_stream_state *stream);
+
+enum dc_status dc_remove_stream_from_ctx(
+		struct dc *dc,
+			struct dc_state *new_ctx,
+			struct dc_stream_state *stream);
+
+
+bool dc_add_plane_to_context(
+		const struct dc *dc,
+		struct dc_stream_state *stream,
+		struct dc_plane_state *plane_state,
+		struct dc_state *context);
+
+bool dc_remove_plane_from_context(
+		const struct dc *dc,
+		struct dc_stream_state *stream,
+		struct dc_plane_state *plane_state,
+		struct dc_state *context);
+
+bool dc_rem_all_planes_for_stream(
+		const struct dc *dc,
+		struct dc_stream_state *stream,
+		struct dc_state *context);
+
+bool dc_add_all_planes_for_stream(
+		const struct dc *dc,
+		struct dc_stream_state *stream,
+		struct dc_plane_state * const *plane_states,
+		int plane_count,
+		struct dc_state *context);
+
+/*
+ * Structure to store surface/stream associations for validation
+ */
+struct dc_validation_set {
+	struct dc_stream_state *stream;
+	struct dc_plane_state *plane_states[MAX_SURFACES];
+	uint8_t plane_count;
+};
+
+enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream);
+
+enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state);
+
+enum dc_status dc_validate_global_state(
+		struct dc *dc,
+		struct dc_state *new_ctx);
+
+/*
+ * This function takes a stream and checks if it is guaranteed to be supported.
+ * Guaranteed means that MAX_COFUNC similar streams are supported.
+ *
+ * After this call:
+ *   No hardware is programmed for call.  Only validation is done.
+ */
+
+
+void dc_resource_state_construct(
+		const struct dc *dc,
+		struct dc_state *dst_ctx);
+
+void dc_resource_state_copy_construct(
+		const struct dc_state *src_ctx,
+		struct dc_state *dst_ctx);
+
+void dc_resource_state_copy_construct_current(
+		const struct dc *dc,
+		struct dc_state *dst_ctx);
+
+void dc_resource_state_destruct(struct dc_state *context);
+
+/*
+ * TODO update to make it about validation sets
+ * Set up streams and links associated to drive sinks
+ * The streams parameter is an absolute set of all active streams.
+ *
+ * After this call:
+ *   Phy, Encoder, Timing Generator are programmed and enabled.
+ *   New streams are enabled with blank stream; no memory read.
+ */
+bool dc_commit_state(struct dc *dc, struct dc_state *context);
+
+/*
+ * Set up streams and links associated to drive sinks
+ * The streams parameter is an absolute set of all active streams.
+ *
+ * After this call:
+ *   Phy, Encoder, Timing Generator are programmed and enabled.
+ *   New streams are enabled with blank stream; no memory read.
+ */
+/*
+ * Enable stereo when commit_streams is not required,
+ * for example, frame alternate.
+ */
+bool dc_enable_stereo(
+	struct dc *dc,
+	struct dc_state *context,
+	struct dc_stream_state *streams[],
+	uint8_t stream_count);
+
+/**
+ * Create a new default stream for the requested sink
+ */
+struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
+
+void dc_stream_retain(struct dc_stream_state *dc_stream);
+void dc_stream_release(struct dc_stream_state *dc_stream);
+
+struct dc_stream_status *dc_stream_get_status(
+	struct dc_stream_state *dc_stream);
+
+enum surface_update_type dc_check_update_surfaces_for_stream(
+		struct dc *dc,
+		struct dc_surface_update *updates,
+		int surface_count,
+		struct dc_stream_update *stream_update,
+		const struct dc_stream_status *stream_status);
+
+
+struct dc_state *dc_create_state(void);
+void dc_retain_state(struct dc_state *context);
+void dc_release_state(struct dc_state *context);
+
+/*******************************************************************************
+ * Link Interfaces
+ ******************************************************************************/
+
+struct dpcd_caps {
+	union dpcd_rev dpcd_rev;
+	union max_lane_count max_ln_count;
+	union max_down_spread max_down_spread;
+
+	/* dongle type (DP converter, CV smart dongle) */
+	enum display_dongle_type dongle_type;
+	/* Dongle's downstream count. */
+	union sink_count sink_count;
+	/* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+	indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
+	struct dc_dongle_caps dongle_caps;
+
+	uint32_t sink_dev_id;
+	uint32_t branch_dev_id;
+	int8_t branch_dev_name[6];
+	int8_t branch_hw_revision;
+
+	bool allow_invalid_MSA_timing_param;
+	bool panel_mode_edp;
+	bool dpcd_display_control_capable;
+};
+
+struct dc_link_status {
+	struct dpcd_caps *dpcd_caps;
+};
+
+/* DP MST stream allocation (payload bandwidth number) */
+struct link_mst_stream_allocation {
+	/* DIG front */
+	const struct stream_encoder *stream_enc;
+	/* associate DRM payload table with DC stream encoder */
+	uint8_t vcp_id;
+	/* number of slots required for the DP stream in transport packet */
+	uint8_t slot_count;
+};
+
+/* DP MST stream allocation table */
+struct link_mst_stream_allocation_table {
+	/* number of DP video streams */
+	int stream_count;
+	/* array of stream allocations */
+	struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
+};
+
+/*
+ * A link contains one or more sinks and their connected status.
+ * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
+ */
+struct dc_link {
+	struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
+	unsigned int sink_count;
+	struct dc_sink *local_sink;
+	unsigned int link_index;
+	enum dc_connection_type type;
+	enum signal_type connector_signal;
+	enum dc_irq_source irq_source_hpd;
+	enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse  */
+	/* caps is the same as reported_link_cap. link_traing use
+	 * reported_link_cap. Will clean up.  TODO
+	 */
+	struct dc_link_settings reported_link_cap;
+	struct dc_link_settings verified_link_cap;
+	struct dc_link_settings cur_link_settings;
+	struct dc_lane_settings cur_lane_setting;
+	struct dc_link_settings preferred_link_setting;
+
+	uint8_t ddc_hw_inst;
+
+	uint8_t hpd_src;
+
+	uint8_t link_enc_hw_inst;
+
+	bool test_pattern_enabled;
+	union compliance_test_state compliance_test_state;
+
+	void *priv;
+
+	struct ddc_service *ddc;
+
+	bool aux_mode;
+
+	/* Private to DC core */
+
+	const struct dc *dc;
+
+	struct dc_context *ctx;
+
+	struct link_encoder *link_enc;
+	struct graphics_object_id link_id;
+	union ddi_channel_mapping ddi_channel_mapping;
+	struct connector_device_tag_info device_tag;
+	struct dpcd_caps dpcd_caps;
+	unsigned short chip_caps;
+	unsigned int dpcd_sink_count;
+	enum edp_revision edp_revision;
+	bool psr_enabled;
+
+	/* MST record stream using this link */
+	struct link_flags {
+		bool dp_keep_receiver_powered;
+	} wa_flags;
+	struct link_mst_stream_allocation_table mst_stream_alloc_table;
+
+	struct dc_link_status link_status;
+
+};
+
+const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
+
+/*
+ * Return an enumerated dc_link.  dc_link order is constant and determined at
+ * boot time.  They cannot be created or destroyed.
+ * Use dc_get_caps() to get number of links.
+ */
+static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
+{
+	return dc->links[link_index];
+}
+
+/* Set backlight level of an embedded panel (eDP, LVDS). */
+bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
+		uint32_t frame_ramp, const struct dc_stream_state *stream);
+
+bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
+
+bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
+
+bool dc_link_setup_psr(struct dc_link *dc_link,
+		const struct dc_stream_state *stream, struct psr_config *psr_config,
+		struct psr_context *psr_context);
+
+/* Request DC to detect if there is a Panel connected.
+ * boot - If this call is during initial boot.
+ * Return false for any type of detection failure or MST detection
+ * true otherwise. True meaning further action is required (status update
+ * and OS notification).
+ */
+enum dc_detect_reason {
+	DETECT_REASON_BOOT,
+	DETECT_REASON_HPD,
+	DETECT_REASON_HPDRX,
+};
+
+bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
+
+/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
+ * Return:
+ * true - Downstream port status changed. DM should call DC to do the
+ * detection.
+ * false - no change in Downstream port status. No further action required
+ * from DM. */
+bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
+		union hpd_irq_data *hpd_irq_dpcd_data);
+
+struct dc_sink_init_data;
+
+struct dc_sink *dc_link_add_remote_sink(
+		struct dc_link *dc_link,
+		const uint8_t *edid,
+		int len,
+		struct dc_sink_init_data *init_data);
+
+void dc_link_remove_remote_sink(
+	struct dc_link *link,
+	struct dc_sink *sink);
+
+/* Used by diagnostics for virtual link at the moment */
+
+void dc_link_dp_set_drive_settings(
+	struct dc_link *link,
+	struct link_training_settings *lt_settings);
+
+enum link_training_result dc_link_dp_perform_link_training(
+	struct dc_link *link,
+	const struct dc_link_settings *link_setting,
+	bool skip_video_pattern);
+
+void dc_link_dp_enable_hpd(const struct dc_link *link);
+
+void dc_link_dp_disable_hpd(const struct dc_link *link);
+
+bool dc_link_dp_set_test_pattern(
+	struct dc_link *link,
+	enum dp_test_pattern test_pattern,
+	const struct link_training_settings *p_link_settings,
+	const unsigned char *p_custom_pattern,
+	unsigned int cust_pattern_size);
+
+/*******************************************************************************
+ * Sink Interfaces - A sink corresponds to a display output device
+ ******************************************************************************/
+
+struct dc_container_id {
+	// 128bit GUID in binary form
+	unsigned char  guid[16];
+	// 8 byte port ID -> ELD.PortID
+	unsigned int   portId[2];
+	// 128bit GUID in binary formufacturer name -> ELD.ManufacturerName
+	unsigned short manufacturerName;
+	// 2 byte product code -> ELD.ProductCode
+	unsigned short productCode;
+};
+
+
+
+/*
+ * The sink structure contains EDID and other display device properties
+ */
+struct dc_sink {
+	enum signal_type sink_signal;
+	struct dc_edid dc_edid; /* raw edid */
+	struct dc_edid_caps edid_caps; /* parse display caps */
+	struct dc_container_id *dc_container_id;
+	uint32_t dongle_max_pix_clk;
+	void *priv;
+	struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
+	bool converter_disable_audio;
+
+	/* private to DC core */
+	struct dc_link *link;
+	struct dc_context *ctx;
+
+	/* private to dc_sink.c */
+	struct kref refcount;
+};
+
+void dc_sink_retain(struct dc_sink *sink);
+void dc_sink_release(struct dc_sink *sink);
+
+struct dc_sink_init_data {
+	enum signal_type sink_signal;
+	struct dc_link *link;
+	uint32_t dongle_max_pix_clk;
+	bool converter_disable_audio;
+};
+
+struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
+
+/*******************************************************************************
+ * Cursor interfaces - To manages the cursor within a stream
+ ******************************************************************************/
+/* TODO: Deprecated once we switch to dc_set_cursor_position */
+bool dc_stream_set_cursor_attributes(
+	struct dc_stream_state *stream,
+	const struct dc_cursor_attributes *attributes);
+
+bool dc_stream_set_cursor_position(
+	struct dc_stream_state *stream,
+	const struct dc_cursor_position *position);
+
+/* Newer interfaces  */
+struct dc_cursor {
+	struct dc_plane_address address;
+	struct dc_cursor_attributes attributes;
+};
+
+/*******************************************************************************
+ * Interrupt interfaces
+ ******************************************************************************/
+enum dc_irq_source dc_interrupt_to_irq_source(
+		struct dc *dc,
+		uint32_t src_id,
+		uint32_t ext_id);
+void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
+void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
+enum dc_irq_source dc_get_hpd_irq_source_at_index(
+		struct dc *dc, uint32_t link_index);
+
+/*******************************************************************************
+ * Power Interfaces
+ ******************************************************************************/
+
+void dc_set_power_state(
+		struct dc *dc,
+		enum dc_acpi_cm_power_state power_state);
+void dc_resume(struct dc *dc);
+
+/*
+ * DPCD access interfaces
+ */
+
+bool dc_submit_i2c(
+		struct dc *dc,
+		uint32_t link_index,
+		struct i2c_command *cmd);
+
+
+#endif /* DC_INTERFACE_H_ */

+ 218 - 0
drivers/gpu/drm/amd/display/dc/dc_bios_types.h

@@ -0,0 +1,218 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_BIOS_TYPES_H
+#define DC_BIOS_TYPES_H
+
+/******************************************************************************
+ * Interface file for VBIOS implementations.
+ *
+ * The default implementation is inside DC.
+ * Display Manager (which instantiates DC) has the option to supply it's own
+ * (external to DC) implementation of VBIOS, which will be called by DC, using
+ * this interface.
+ * (The intended use is Diagnostics, but other uses may appear.)
+ *****************************************************************************/
+
+#include "include/bios_parser_types.h"
+
+struct dc_vbios_funcs {
+	uint8_t (*get_connectors_number)(struct dc_bios *bios);
+
+	struct graphics_object_id (*get_encoder_id)(
+		struct dc_bios *bios,
+		uint32_t i);
+	struct graphics_object_id (*get_connector_id)(
+		struct dc_bios *bios,
+		uint8_t connector_index);
+	uint32_t (*get_dst_number)(
+		struct dc_bios *bios,
+		struct graphics_object_id id);
+
+	enum bp_result (*get_src_obj)(
+		struct dc_bios *bios,
+		struct graphics_object_id object_id, uint32_t index,
+		struct graphics_object_id *src_object_id);
+	enum bp_result (*get_dst_obj)(
+		struct dc_bios *bios,
+		struct graphics_object_id object_id, uint32_t index,
+		struct graphics_object_id *dest_object_id);
+
+	enum bp_result (*get_i2c_info)(
+		struct dc_bios *dcb,
+		struct graphics_object_id id,
+		struct graphics_object_i2c_info *info);
+
+	enum bp_result (*get_voltage_ddc_info)(
+		struct dc_bios *bios,
+		uint32_t index,
+		struct graphics_object_i2c_info *info);
+	enum bp_result (*get_thermal_ddc_info)(
+		struct dc_bios *bios,
+		uint32_t i2c_channel_id,
+		struct graphics_object_i2c_info *info);
+	enum bp_result (*get_hpd_info)(
+		struct dc_bios *bios,
+		struct graphics_object_id id,
+		struct graphics_object_hpd_info *info);
+	enum bp_result (*get_device_tag)(
+		struct dc_bios *bios,
+		struct graphics_object_id connector_object_id,
+		uint32_t device_tag_index,
+		struct connector_device_tag_info *info);
+	enum bp_result (*get_firmware_info)(
+		struct dc_bios *bios,
+		struct dc_firmware_info *info);
+	enum bp_result (*get_spread_spectrum_info)(
+		struct dc_bios *bios,
+		enum as_signal_type signal,
+		uint32_t index,
+		struct spread_spectrum_info *ss_info);
+	uint32_t (*get_ss_entry_number)(
+		struct dc_bios *bios,
+		enum as_signal_type signal);
+	enum bp_result (*get_embedded_panel_info)(
+		struct dc_bios *bios,
+		struct embedded_panel_info *info);
+	enum bp_result (*get_gpio_pin_info)(
+		struct dc_bios *bios,
+		uint32_t gpio_id,
+		struct gpio_pin_info *info);
+	enum bp_result (*get_encoder_cap_info)(
+		struct dc_bios *bios,
+		struct graphics_object_id object_id,
+		struct bp_encoder_cap_info *info);
+
+	bool (*is_lid_status_changed)(
+		struct dc_bios *bios);
+	bool (*is_display_config_changed)(
+		struct dc_bios *bios);
+	bool (*is_accelerated_mode)(
+		struct dc_bios *bios);
+	void (*get_bios_event_info)(
+		struct dc_bios *bios,
+		struct bios_event_info *info);
+	void (*update_requested_backlight_level)(
+		struct dc_bios *bios,
+		uint32_t backlight_8bit);
+	uint32_t (*get_requested_backlight_level)(
+		struct dc_bios *bios);
+	void (*take_backlight_control)(
+		struct dc_bios *bios,
+		bool cntl);
+
+	bool (*is_active_display)(
+		struct dc_bios *bios,
+		enum signal_type signal,
+		const struct connector_device_tag_info *device_tag);
+	enum controller_id (*get_embedded_display_controller_id)(
+		struct dc_bios *bios);
+	uint32_t (*get_embedded_display_refresh_rate)(
+		struct dc_bios *bios);
+
+	void (*set_scratch_critical_state)(
+		struct dc_bios *bios,
+		bool state);
+	bool (*is_device_id_supported)(
+		struct dc_bios *bios,
+		struct device_id id);
+
+	/* COMMANDS */
+
+	enum bp_result (*encoder_control)(
+		struct dc_bios *bios,
+		struct bp_encoder_control *cntl);
+	enum bp_result (*transmitter_control)(
+		struct dc_bios *bios,
+		struct bp_transmitter_control *cntl);
+	enum bp_result (*crt_control)(
+		struct dc_bios *bios,
+		enum engine_id engine_id,
+		bool enable,
+		uint32_t pixel_clock);
+	enum bp_result (*enable_crtc)(
+		struct dc_bios *bios,
+		enum controller_id id,
+		bool enable);
+	enum bp_result (*adjust_pixel_clock)(
+		struct dc_bios *bios,
+		struct bp_adjust_pixel_clock_parameters *bp_params);
+	enum bp_result (*set_pixel_clock)(
+		struct dc_bios *bios,
+		struct bp_pixel_clock_parameters *bp_params);
+	enum bp_result (*set_dce_clock)(
+		struct dc_bios *bios,
+		struct bp_set_dce_clock_parameters *bp_params);
+	unsigned int (*get_smu_clock_info)(
+		struct dc_bios *bios);
+	enum bp_result (*enable_spread_spectrum_on_ppll)(
+		struct dc_bios *bios,
+		struct bp_spread_spectrum_parameters *bp_params,
+		bool enable);
+	enum bp_result (*program_crtc_timing)(
+		struct dc_bios *bios,
+		struct bp_hw_crtc_timing_parameters *bp_params);
+
+	enum bp_result (*crtc_source_select)(
+		struct dc_bios *bios,
+		struct bp_crtc_source_select *bp_params);
+	enum bp_result (*program_display_engine_pll)(
+		struct dc_bios *bios,
+		struct bp_pixel_clock_parameters *bp_params);
+
+	enum signal_type (*dac_load_detect)(
+		struct dc_bios *bios,
+		struct graphics_object_id encoder,
+		struct graphics_object_id connector,
+		enum signal_type display_signal);
+
+	enum bp_result (*enable_disp_power_gating)(
+		struct dc_bios *bios,
+		enum controller_id controller_id,
+		enum bp_pipe_control_action action);
+
+	void (*post_init)(struct dc_bios *bios);
+
+	void (*bios_parser_destroy)(struct dc_bios **dcb);
+};
+
+struct bios_registers {
+	uint32_t BIOS_SCRATCH_6;
+};
+
+struct dc_bios {
+	const struct dc_vbios_funcs *funcs;
+
+	uint8_t *bios;
+	uint32_t bios_size;
+
+	uint8_t *bios_local_image;
+
+	struct dc_context *ctx;
+	const struct bios_registers *regs;
+	struct integrated_info *integrated_info;
+};
+
+#endif /* DC_BIOS_TYPES_H */

+ 115 - 0
drivers/gpu/drm/amd/display/dc/dc_ddc_types.h

@@ -0,0 +1,115 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef DC_DDC_TYPES_H_
+#define DC_DDC_TYPES_H_
+
+struct i2c_payload {
+	bool write;
+	uint8_t address;
+	uint32_t length;
+	uint8_t *data;
+};
+
+enum i2c_command_engine {
+	I2C_COMMAND_ENGINE_DEFAULT,
+	I2C_COMMAND_ENGINE_SW,
+	I2C_COMMAND_ENGINE_HW
+};
+
+struct i2c_command {
+	struct i2c_payload *payloads;
+	uint8_t number_of_payloads;
+
+	enum i2c_command_engine engine;
+
+	/* expressed in KHz
+	 * zero means "use default value" */
+	uint32_t speed;
+};
+
+struct gpio_ddc_hw_info {
+	bool hw_supported;
+	uint32_t ddc_channel;
+};
+
+struct ddc {
+	struct gpio *pin_data;
+	struct gpio *pin_clock;
+	struct gpio_ddc_hw_info hw_info;
+	struct dc_context *ctx;
+};
+
+union ddc_wa {
+	struct {
+		uint32_t DP_SKIP_POWER_OFF:1;
+		uint32_t DP_AUX_POWER_UP_WA_DELAY:1;
+	} bits;
+	uint32_t raw;
+};
+
+struct ddc_flags {
+	uint8_t EDID_QUERY_DONE_ONCE:1;
+	uint8_t IS_INTERNAL_DISPLAY:1;
+	uint8_t FORCE_READ_REPEATED_START:1;
+	uint8_t EDID_STRESS_READ:1;
+
+};
+
+enum ddc_transaction_type {
+	DDC_TRANSACTION_TYPE_NONE = 0,
+	DDC_TRANSACTION_TYPE_I2C,
+	DDC_TRANSACTION_TYPE_I2C_OVER_AUX,
+	DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER,
+	DDC_TRANSACTION_TYPE_I2C_OVER_AUX_RETRY_DEFER
+};
+
+enum display_dongle_type {
+	DISPLAY_DONGLE_NONE = 0,
+	/* Active converter types*/
+	DISPLAY_DONGLE_DP_VGA_CONVERTER,
+	DISPLAY_DONGLE_DP_DVI_CONVERTER,
+	DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+	/* DP-HDMI/DVI passive dongles (Type 1 and Type 2)*/
+	DISPLAY_DONGLE_DP_DVI_DONGLE,
+	DISPLAY_DONGLE_DP_HDMI_DONGLE,
+	/* Other types of dongle*/
+	DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE,
+};
+
+struct ddc_service {
+	struct ddc *ddc_pin;
+	struct ddc_flags flags;
+	union ddc_wa wa;
+	enum ddc_transaction_type transaction_type;
+	enum display_dongle_type dongle_type;
+	struct dc_context *ctx;
+	struct dc_link *link;
+
+	uint32_t address;
+	uint32_t edid_buf_len;
+	uint8_t edid_buf[MAX_EDID_BUFFER_SIZE];
+};
+
+#endif /* DC_DDC_TYPES_H_ */

+ 467 - 0
drivers/gpu/drm/amd/display/dc/dc_dp_types.h

@@ -0,0 +1,467 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_DP_TYPES_H
+#define DC_DP_TYPES_H
+
+enum dc_lane_count {
+	LANE_COUNT_UNKNOWN = 0,
+	LANE_COUNT_ONE = 1,
+	LANE_COUNT_TWO = 2,
+	LANE_COUNT_FOUR = 4,
+	LANE_COUNT_EIGHT = 8,
+	LANE_COUNT_DP_MAX = LANE_COUNT_FOUR
+};
+
+/* This is actually a reference clock (27MHz) multiplier
+ * 162MBps bandwidth for 1.62GHz like rate,
+ * 270MBps for 2.70GHz,
+ * 324MBps for 3.24Ghz,
+ * 540MBps for 5.40GHz
+ * 810MBps for 8.10GHz
+ */
+enum dc_link_rate {
+	LINK_RATE_UNKNOWN = 0,
+	LINK_RATE_LOW = 0x06,
+	LINK_RATE_HIGH = 0x0A,
+	LINK_RATE_RBR2 = 0x0C,
+	LINK_RATE_HIGH2 = 0x14,
+	LINK_RATE_HIGH3 = 0x1E
+};
+
+enum dc_link_spread {
+	LINK_SPREAD_DISABLED = 0x00,
+	/* 0.5 % downspread 30 kHz */
+	LINK_SPREAD_05_DOWNSPREAD_30KHZ = 0x10,
+	/* 0.5 % downspread 33 kHz */
+	LINK_SPREAD_05_DOWNSPREAD_33KHZ = 0x11
+};
+
+enum dc_voltage_swing {
+	VOLTAGE_SWING_LEVEL0 = 0,	/* direct HW translation! */
+	VOLTAGE_SWING_LEVEL1,
+	VOLTAGE_SWING_LEVEL2,
+	VOLTAGE_SWING_LEVEL3,
+	VOLTAGE_SWING_MAX_LEVEL = VOLTAGE_SWING_LEVEL3
+};
+
+enum dc_pre_emphasis {
+	PRE_EMPHASIS_DISABLED = 0,	/* direct HW translation! */
+	PRE_EMPHASIS_LEVEL1,
+	PRE_EMPHASIS_LEVEL2,
+	PRE_EMPHASIS_LEVEL3,
+	PRE_EMPHASIS_MAX_LEVEL = PRE_EMPHASIS_LEVEL3
+};
+/* Post Cursor 2 is optional for transmitter
+ * and it applies only to the main link operating at HBR2
+ */
+enum dc_post_cursor2 {
+	POST_CURSOR2_DISABLED = 0,	/* direct HW translation! */
+	POST_CURSOR2_LEVEL1,
+	POST_CURSOR2_LEVEL2,
+	POST_CURSOR2_LEVEL3,
+	POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3,
+};
+
+struct dc_link_settings {
+	enum dc_lane_count lane_count;
+	enum dc_link_rate link_rate;
+	enum dc_link_spread link_spread;
+};
+
+struct dc_lane_settings {
+	enum dc_voltage_swing VOLTAGE_SWING;
+	enum dc_pre_emphasis PRE_EMPHASIS;
+	enum dc_post_cursor2 POST_CURSOR2;
+};
+
+struct dc_link_training_settings {
+	struct dc_link_settings link;
+	struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
+};
+
+
+union dpcd_rev {
+	struct {
+		uint8_t MINOR:4;
+		uint8_t MAJOR:4;
+	} bits;
+	uint8_t raw;
+};
+
+union max_lane_count {
+	struct {
+		uint8_t MAX_LANE_COUNT:5;
+		uint8_t POST_LT_ADJ_REQ_SUPPORTED:1;
+		uint8_t TPS3_SUPPORTED:1;
+		uint8_t ENHANCED_FRAME_CAP:1;
+	} bits;
+	uint8_t raw;
+};
+
+union max_down_spread {
+	struct {
+		uint8_t MAX_DOWN_SPREAD:1;
+		uint8_t RESERVED:5;
+		uint8_t NO_AUX_HANDSHAKE_LINK_TRAINING:1;
+		uint8_t TPS4_SUPPORTED:1;
+	} bits;
+	uint8_t raw;
+};
+
+union mstm_cap {
+	struct {
+		uint8_t MST_CAP:1;
+		uint8_t RESERVED:7;
+	} bits;
+	uint8_t raw;
+};
+
+union lane_count_set {
+	struct {
+		uint8_t LANE_COUNT_SET:5;
+		uint8_t POST_LT_ADJ_REQ_GRANTED:1;
+		uint8_t RESERVED:1;
+		uint8_t ENHANCED_FRAMING:1;
+	} bits;
+	uint8_t raw;
+};
+
+union lane_status {
+	struct {
+		uint8_t CR_DONE_0:1;
+		uint8_t CHANNEL_EQ_DONE_0:1;
+		uint8_t SYMBOL_LOCKED_0:1;
+		uint8_t RESERVED0:1;
+		uint8_t CR_DONE_1:1;
+		uint8_t CHANNEL_EQ_DONE_1:1;
+		uint8_t SYMBOL_LOCKED_1:1;
+		uint8_t RESERVED_1:1;
+	} bits;
+	uint8_t raw;
+};
+
+union device_service_irq {
+	struct {
+		uint8_t REMOTE_CONTROL_CMD_PENDING:1;
+		uint8_t AUTOMATED_TEST:1;
+		uint8_t CP_IRQ:1;
+		uint8_t MCCS_IRQ:1;
+		uint8_t DOWN_REP_MSG_RDY:1;
+		uint8_t UP_REQ_MSG_RDY:1;
+		uint8_t SINK_SPECIFIC:1;
+		uint8_t reserved:1;
+	} bits;
+	uint8_t raw;
+};
+
+union sink_count {
+	struct {
+		uint8_t SINK_COUNT:6;
+		uint8_t CPREADY:1;
+		uint8_t RESERVED:1;
+	} bits;
+	uint8_t raw;
+};
+
+union lane_align_status_updated {
+	struct {
+		uint8_t INTERLANE_ALIGN_DONE:1;
+		uint8_t POST_LT_ADJ_REQ_IN_PROGRESS:1;
+		uint8_t RESERVED:4;
+		uint8_t DOWNSTREAM_PORT_STATUS_CHANGED:1;
+		uint8_t LINK_STATUS_UPDATED:1;
+	} bits;
+	uint8_t raw;
+};
+
+union lane_adjust {
+	struct {
+		uint8_t VOLTAGE_SWING_LANE:2;
+		uint8_t PRE_EMPHASIS_LANE:2;
+		uint8_t RESERVED:4;
+	} bits;
+	uint8_t raw;
+};
+
+union dpcd_training_pattern {
+	struct {
+		uint8_t TRAINING_PATTERN_SET:4;
+		uint8_t RECOVERED_CLOCK_OUT_EN:1;
+		uint8_t SCRAMBLING_DISABLE:1;
+		uint8_t SYMBOL_ERROR_COUNT_SEL:2;
+	} v1_4;
+	struct {
+		uint8_t TRAINING_PATTERN_SET:2;
+		uint8_t LINK_QUAL_PATTERN_SET:2;
+		uint8_t RESERVED:4;
+	} v1_3;
+	uint8_t raw;
+};
+
+/* Training Lane is used to configure downstream DP device's voltage swing
+and pre-emphasis levels*/
+/* The DPCD addresses are from 0x103 to 0x106*/
+union dpcd_training_lane {
+	struct {
+		uint8_t VOLTAGE_SWING_SET:2;
+		uint8_t MAX_SWING_REACHED:1;
+		uint8_t PRE_EMPHASIS_SET:2;
+		uint8_t MAX_PRE_EMPHASIS_REACHED:1;
+		uint8_t RESERVED:2;
+	} bits;
+	uint8_t raw;
+};
+
+/* TMDS-converter related */
+union dwnstream_port_caps_byte0 {
+	struct {
+		uint8_t DWN_STRM_PORTX_TYPE:3;
+		uint8_t DWN_STRM_PORTX_HPD:1;
+		uint8_t RESERVERD:4;
+	} bits;
+	uint8_t raw;
+};
+
+/* these are the detailed types stored at DWN_STRM_PORTX_CAP (00080h)*/
+enum dpcd_downstream_port_detailed_type {
+	DOWN_STREAM_DETAILED_DP = 0,
+	DOWN_STREAM_DETAILED_VGA,
+	DOWN_STREAM_DETAILED_DVI,
+	DOWN_STREAM_DETAILED_HDMI,
+	DOWN_STREAM_DETAILED_NONDDC,/* has no EDID (TV,CV)*/
+	DOWN_STREAM_DETAILED_DP_PLUS_PLUS
+};
+
+union dwnstream_port_caps_byte1 {
+	struct {
+		uint8_t MAX_BITS_PER_COLOR_COMPONENT:2;
+		uint8_t RESERVED:6;
+	} bits;
+	uint8_t raw;
+};
+
+union dp_downstream_port_present {
+	uint8_t byte;
+	struct {
+		uint8_t PORT_PRESENT:1;
+		uint8_t PORT_TYPE:2;
+		uint8_t FMT_CONVERSION:1;
+		uint8_t DETAILED_CAPS:1;
+		uint8_t RESERVED:3;
+	} fields;
+};
+
+union dwnstream_port_caps_byte3_dvi {
+	struct {
+		uint8_t RESERVED1:1;
+		uint8_t DUAL_LINK:1;
+		uint8_t HIGH_COLOR_DEPTH:1;
+		uint8_t RESERVED2:5;
+	} bits;
+	uint8_t raw;
+};
+
+union dwnstream_port_caps_byte3_hdmi {
+	struct {
+		uint8_t FRAME_SEQ_TO_FRAME_PACK:1;
+		uint8_t YCrCr422_PASS_THROUGH:1;
+		uint8_t YCrCr420_PASS_THROUGH:1;
+		uint8_t YCrCr422_CONVERSION:1;
+		uint8_t YCrCr420_CONVERSION:1;
+		uint8_t RESERVED:3;
+	} bits;
+	uint8_t raw;
+};
+
+/*4-byte structure for detailed capabilities of a down-stream port
+(DP-to-TMDS converter).*/
+
+union sink_status {
+	struct {
+		uint8_t RX_PORT0_STATUS:1;
+		uint8_t RX_PORT1_STATUS:1;
+		uint8_t RESERVED:6;
+	} bits;
+	uint8_t raw;
+};
+
+/*6-byte structure corresponding to 6 registers (200h-205h)
+read during handling of HPD-IRQ*/
+union hpd_irq_data {
+	struct {
+		union sink_count sink_cnt;/* 200h */
+		union device_service_irq device_service_irq;/* 201h */
+		union lane_status lane01_status;/* 202h */
+		union lane_status lane23_status;/* 203h */
+		union lane_align_status_updated lane_status_updated;/* 204h */
+		union sink_status sink_status;
+	} bytes;
+	uint8_t raw[6];
+};
+
+union down_stream_port_count {
+	struct {
+		uint8_t DOWN_STR_PORT_COUNT:4;
+		uint8_t RESERVED:2; /*Bits 5:4 = RESERVED. Read all 0s.*/
+		/*Bit 6 = MSA_TIMING_PAR_IGNORED
+		0 = Sink device requires the MSA timing parameters
+		1 = Sink device is capable of rendering incoming video
+		 stream without MSA timing parameters*/
+		uint8_t IGNORE_MSA_TIMING_PARAM:1;
+		/*Bit 7 = OUI Support
+		0 = OUI not supported
+		1 = OUI supported
+		(OUI and Device Identification mandatory for DP 1.2)*/
+		uint8_t OUI_SUPPORT:1;
+	} bits;
+	uint8_t raw;
+};
+
+union down_spread_ctrl {
+	struct {
+		uint8_t RESERVED1:4;/* Bit 3:0 = RESERVED. Read all 0s*/
+	/* Bits 4 = SPREAD_AMP. Spreading amplitude
+	0 = Main link signal is not downspread
+	1 = Main link signal is downspread <= 0.5%
+	with frequency in the range of 30kHz ~ 33kHz*/
+		uint8_t SPREAD_AMP:1;
+		uint8_t RESERVED2:2;/*Bit 6:5 = RESERVED. Read all 0s*/
+	/*Bit 7 = MSA_TIMING_PAR_IGNORE_EN
+	0 = Source device will send valid data for the MSA Timing Params
+	1 = Source device may send invalid data for these MSA Timing Params*/
+		uint8_t IGNORE_MSA_TIMING_PARAM:1;
+	} bits;
+	uint8_t raw;
+};
+
+union dpcd_edp_config {
+	struct {
+		uint8_t PANEL_MODE_EDP:1;
+		uint8_t FRAMING_CHANGE_ENABLE:1;
+		uint8_t RESERVED:5;
+		uint8_t PANEL_SELF_TEST_ENABLE:1;
+	} bits;
+	uint8_t raw;
+};
+
+struct dp_device_vendor_id {
+	uint8_t ieee_oui[3];/*24-bit IEEE OUI*/
+	uint8_t ieee_device_id[6];/*usually 6-byte ASCII name*/
+};
+
+struct dp_sink_hw_fw_revision {
+	uint8_t ieee_hw_rev;
+	uint8_t ieee_fw_rev[2];
+};
+
+/*DPCD register of DP receiver capability field bits-*/
+union edp_configuration_cap {
+	struct {
+		uint8_t ALT_SCRAMBLER_RESET:1;
+		uint8_t FRAMING_CHANGE:1;
+		uint8_t RESERVED:1;
+		uint8_t DPCD_DISPLAY_CONTROL_CAPABLE:1;
+		uint8_t RESERVED2:4;
+	} bits;
+	uint8_t raw;
+};
+
+union training_aux_rd_interval {
+	struct {
+		uint8_t TRAINIG_AUX_RD_INTERVAL:7;
+		uint8_t EXT_RECIEVER_CAP_FIELD_PRESENT:1;
+	} bits;
+	uint8_t raw;
+};
+
+/* Automated test structures */
+union test_request {
+	struct {
+	uint8_t LINK_TRAINING         :1;
+	uint8_t LINK_TEST_PATTRN      :1;
+	uint8_t EDID_REAT             :1;
+	uint8_t PHY_TEST_PATTERN      :1;
+	uint8_t AUDIO_TEST_PATTERN    :1;
+	uint8_t RESERVED              :1;
+	uint8_t TEST_STEREO_3D        :1;
+	} bits;
+	uint8_t raw;
+};
+
+union test_response {
+	struct {
+		uint8_t ACK         :1;
+		uint8_t NO_ACK      :1;
+		uint8_t RESERVED    :6;
+	} bits;
+	uint8_t raw;
+};
+
+union phy_test_pattern {
+	struct {
+		/* DpcdPhyTestPatterns. This field is 2 bits for DP1.1
+		 * and 3 bits for DP1.2.
+		 */
+		uint8_t PATTERN     :3;
+		/* BY speci, bit7:2 is 0 for DP1.1. */
+		uint8_t RESERVED    :5;
+	} bits;
+	uint8_t raw;
+};
+
+/* States of Compliance Test Specification (CTS DP1.2). */
+union compliance_test_state {
+	struct {
+		unsigned char STEREO_3D_RUNNING        : 1;
+		unsigned char RESERVED                 : 7;
+	} bits;
+	unsigned char raw;
+};
+
+union link_test_pattern {
+	struct {
+		/* dpcd_link_test_patterns */
+		unsigned char PATTERN :2;
+		unsigned char RESERVED:6;
+	} bits;
+	unsigned char raw;
+};
+
+union test_misc {
+	struct dpcd_test_misc_bits {
+		unsigned char SYNC_CLOCK :1;
+		/* dpcd_test_color_format */
+		unsigned char CLR_FORMAT :2;
+		/* dpcd_test_dyn_range */
+		unsigned char DYN_RANGE  :1;
+		unsigned char YCBCR      :1;
+		/* dpcd_test_bit_depth */
+		unsigned char BPC        :3;
+	} bits;
+	unsigned char raw;
+};
+
+#endif /* DC_DP_TYPES_H */

+ 171 - 0
drivers/gpu/drm/amd/display/dc/dc_helper.c

@@ -0,0 +1,171 @@
+/*
+ * dc_helper.c
+ *
+ *  Created on: Aug 30, 2016
+ *      Author: agrodzov
+ */
+#include "dm_services.h"
+#include <stdarg.h>
+
+uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+		uint32_t addr, uint32_t reg_val, int n,
+		uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+		...)
+{
+	uint32_t shift, mask, field_value;
+	int i = 1;
+
+	va_list ap;
+	va_start(ap, field_value1);
+
+	reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
+
+	while (i < n) {
+		shift = va_arg(ap, uint32_t);
+		mask = va_arg(ap, uint32_t);
+		field_value = va_arg(ap, uint32_t);
+
+		reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
+		i++;
+	}
+
+	dm_write_reg(ctx, addr, reg_val);
+	va_end(ap);
+
+	return reg_val;
+}
+
+uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
+		uint8_t shift, uint32_t mask, uint32_t *field_value)
+{
+	uint32_t reg_val = dm_read_reg(ctx, addr);
+	*field_value = get_reg_field_value_ex(reg_val, mask, shift);
+	return reg_val;
+}
+
+uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr,
+		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+		uint8_t shift2, uint32_t mask2, uint32_t *field_value2)
+{
+	uint32_t reg_val = dm_read_reg(ctx, addr);
+	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+	return reg_val;
+}
+
+uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr,
+		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+		uint8_t shift3, uint32_t mask3, uint32_t *field_value3)
+{
+	uint32_t reg_val = dm_read_reg(ctx, addr);
+	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+	*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+	return reg_val;
+}
+
+uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr,
+		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+		uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+		uint8_t shift4, uint32_t mask4, uint32_t *field_value4)
+{
+	uint32_t reg_val = dm_read_reg(ctx, addr);
+	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+	*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+	*field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
+	return reg_val;
+}
+
+uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
+		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+		uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+		uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+		uint8_t shift5, uint32_t mask5, uint32_t *field_value5)
+{
+	uint32_t reg_val = dm_read_reg(ctx, addr);
+	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+	*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+	*field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
+	*field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
+	return reg_val;
+}
+
+/* note:  va version of this is pretty bad idea, since there is a output parameter pass by pointer
+ * compiler won't be able to check for size match and is prone to stack corruption type of bugs
+
+uint32_t generic_reg_get(const struct dc_context *ctx,
+		uint32_t addr, int n, ...)
+{
+	uint32_t shift, mask;
+	uint32_t *field_value;
+	uint32_t reg_val;
+	int i = 0;
+
+	reg_val = dm_read_reg(ctx, addr);
+
+	va_list ap;
+	va_start(ap, n);
+
+	while (i < n) {
+		shift = va_arg(ap, uint32_t);
+		mask = va_arg(ap, uint32_t);
+		field_value = va_arg(ap, uint32_t *);
+
+		*field_value = get_reg_field_value_ex(reg_val, mask, shift);
+		i++;
+	}
+
+	va_end(ap);
+
+	return reg_val;
+}
+*/
+
+uint32_t generic_reg_wait(const struct dc_context *ctx,
+	uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value,
+	unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
+	const char *func_name, int line)
+{
+	uint32_t field_value;
+	uint32_t reg_val;
+	int i;
+
+	/* something is terribly wrong if time out is > 200ms. (5Hz) */
+	ASSERT(delay_between_poll_us * time_out_num_tries <= 200000);
+
+	if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
+		/* 35 seconds */
+		delay_between_poll_us = 35000;
+		time_out_num_tries = 1000;
+	}
+
+	for (i = 0; i <= time_out_num_tries; i++) {
+		if (i) {
+			if (delay_between_poll_us >= 1000)
+				msleep(delay_between_poll_us/1000);
+			else if (delay_between_poll_us > 0)
+				udelay(delay_between_poll_us);
+		}
+
+		reg_val = dm_read_reg(ctx, addr);
+
+		field_value = get_reg_field_value_ex(reg_val, mask, shift);
+
+		if (field_value == condition_value)
+			return reg_val;
+	}
+
+	dm_error("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
+			delay_between_poll_us, time_out_num_tries,
+			func_name, line);
+
+	if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+		BREAK_TO_DEBUGGER();
+
+	return reg_val;
+}

+ 706 - 0
drivers/gpu/drm/amd/display/dc/dc_hw_types.h

@@ -0,0 +1,706 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_HW_TYPES_H
+#define DC_HW_TYPES_H
+
+#include "os_types.h"
+#include "fixed31_32.h"
+#include "signal_types.h"
+
+/******************************************************************************
+ * Data types for Virtual HW Layer of DAL3.
+ * (see DAL3 design documents for HW Layer definition)
+ *
+ * The intended uses are:
+ * 1. Generation pseudocode sequences for HW programming.
+ * 2. Implementation of real HW programming by HW Sequencer of DAL3.
+ *
+ * Note: do *not* add any types which are *not* used for HW programming - this
+ * will ensure separation of Logic layer from HW layer.
+ ******************************************************************************/
+
+union large_integer {
+	struct {
+		uint32_t low_part;
+		int32_t high_part;
+	};
+
+	struct {
+		uint32_t low_part;
+		int32_t high_part;
+	} u;
+
+	int64_t quad_part;
+};
+
+#define PHYSICAL_ADDRESS_LOC union large_integer
+
+enum dc_plane_addr_type {
+	PLN_ADDR_TYPE_GRAPHICS = 0,
+	PLN_ADDR_TYPE_GRPH_STEREO,
+	PLN_ADDR_TYPE_VIDEO_PROGRESSIVE,
+};
+
+struct dc_plane_address {
+	enum dc_plane_addr_type type;
+	bool tmz_surface;
+	union {
+		struct{
+			PHYSICAL_ADDRESS_LOC addr;
+			PHYSICAL_ADDRESS_LOC meta_addr;
+			union large_integer dcc_const_color;
+		} grph;
+
+		/*stereo*/
+		struct {
+			PHYSICAL_ADDRESS_LOC left_addr;
+			PHYSICAL_ADDRESS_LOC left_meta_addr;
+			union large_integer left_dcc_const_color;
+
+			PHYSICAL_ADDRESS_LOC right_addr;
+			PHYSICAL_ADDRESS_LOC right_meta_addr;
+			union large_integer right_dcc_const_color;
+
+		} grph_stereo;
+
+		/*video  progressive*/
+		struct {
+			PHYSICAL_ADDRESS_LOC luma_addr;
+			PHYSICAL_ADDRESS_LOC luma_meta_addr;
+			union large_integer luma_dcc_const_color;
+
+			PHYSICAL_ADDRESS_LOC chroma_addr;
+			PHYSICAL_ADDRESS_LOC chroma_meta_addr;
+			union large_integer chroma_dcc_const_color;
+		} video_progressive;
+	};
+};
+
+struct dc_size {
+	int width;
+	int height;
+};
+
+struct rect {
+	int x;
+	int y;
+	int width;
+	int height;
+};
+
+union plane_size {
+	/* Grph or Video will be selected
+	 * based on format above:
+	 * Use Video structure if
+	 * format >= DalPixelFormat_VideoBegin
+	 * else use Grph structure
+	 */
+	struct {
+		struct rect surface_size;
+		/* Graphic surface pitch in pixels.
+		 * In LINEAR_GENERAL mode, pitch
+		 * is 32 pixel aligned.
+		 */
+		int surface_pitch;
+	} grph;
+
+	struct {
+		struct rect luma_size;
+		/* Graphic surface pitch in pixels.
+		 * In LINEAR_GENERAL mode, pitch is
+		 * 32 pixel aligned.
+		 */
+		int luma_pitch;
+
+		struct rect chroma_size;
+		/* Graphic surface pitch in pixels.
+		 * In LINEAR_GENERAL mode, pitch is
+		 * 32 pixel aligned.
+		 */
+		int chroma_pitch;
+	} video;
+};
+
+struct dc_plane_dcc_param {
+	bool enable;
+
+	union {
+		struct {
+			int meta_pitch;
+			bool independent_64b_blks;
+		} grph;
+
+		struct {
+			int meta_pitch_l;
+			bool independent_64b_blks_l;
+
+			int meta_pitch_c;
+			bool independent_64b_blks_c;
+		} video;
+	};
+};
+
+/*Displayable pixel format in fb*/
+enum surface_pixel_format {
+	SURFACE_PIXEL_FORMAT_GRPH_BEGIN = 0,
+	/*TOBE REMOVED paletta 256 colors*/
+	SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS =
+		SURFACE_PIXEL_FORMAT_GRPH_BEGIN,
+	/*16 bpp*/
+	SURFACE_PIXEL_FORMAT_GRPH_ARGB1555,
+	/*16 bpp*/
+	SURFACE_PIXEL_FORMAT_GRPH_RGB565,
+	/*32 bpp*/
+	SURFACE_PIXEL_FORMAT_GRPH_ARGB8888,
+	/*32 bpp swaped*/
+	SURFACE_PIXEL_FORMAT_GRPH_ABGR8888,
+
+	SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010,
+	/*swaped*/
+	SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010,
+	/*TOBE REMOVED swaped, XR_BIAS has no differance
+	 * for pixel layout than previous and we can
+	 * delete this after discusion*/
+	SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS,
+	/*64 bpp */
+	SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616,
+	/*float*/
+	SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F,
+	/*swaped & float*/
+	SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
+	/*grow graphics here if necessary */
+
+	SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
+	SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
+		SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
+	SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb,
+	SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
+	SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
+	SURFACE_PIXEL_FORMAT_INVALID
+
+	/*grow 444 video here if necessary */
+};
+
+
+
+/* Pixel format */
+enum pixel_format {
+	/*graph*/
+	PIXEL_FORMAT_UNINITIALIZED,
+	PIXEL_FORMAT_INDEX8,
+	PIXEL_FORMAT_RGB565,
+	PIXEL_FORMAT_ARGB8888,
+	PIXEL_FORMAT_ARGB2101010,
+	PIXEL_FORMAT_ARGB2101010_XRBIAS,
+	PIXEL_FORMAT_FP16,
+	/*video*/
+	PIXEL_FORMAT_420BPP8,
+	PIXEL_FORMAT_420BPP10,
+	/*end of pixel format definition*/
+	PIXEL_FORMAT_INVALID,
+
+	PIXEL_FORMAT_GRPH_BEGIN = PIXEL_FORMAT_INDEX8,
+	PIXEL_FORMAT_GRPH_END = PIXEL_FORMAT_FP16,
+	PIXEL_FORMAT_VIDEO_BEGIN = PIXEL_FORMAT_420BPP8,
+	PIXEL_FORMAT_VIDEO_END = PIXEL_FORMAT_420BPP10,
+	PIXEL_FORMAT_UNKNOWN
+};
+
+enum tile_split_values {
+	DC_DISPLAY_MICRO_TILING = 0x0,
+	DC_THIN_MICRO_TILING = 0x1,
+	DC_DEPTH_MICRO_TILING = 0x2,
+	DC_ROTATED_MICRO_TILING = 0x3,
+};
+
+/* TODO: These values come from hardware spec. We need to readdress this
+ * if they ever change.
+ */
+enum array_mode_values {
+	DC_ARRAY_LINEAR_GENERAL = 0,
+	DC_ARRAY_LINEAR_ALLIGNED,
+	DC_ARRAY_1D_TILED_THIN1,
+	DC_ARRAY_1D_TILED_THICK,
+	DC_ARRAY_2D_TILED_THIN1,
+	DC_ARRAY_PRT_TILED_THIN1,
+	DC_ARRAY_PRT_2D_TILED_THIN1,
+	DC_ARRAY_2D_TILED_THICK,
+	DC_ARRAY_2D_TILED_X_THICK,
+	DC_ARRAY_PRT_TILED_THICK,
+	DC_ARRAY_PRT_2D_TILED_THICK,
+	DC_ARRAY_PRT_3D_TILED_THIN1,
+	DC_ARRAY_3D_TILED_THIN1,
+	DC_ARRAY_3D_TILED_THICK,
+	DC_ARRAY_3D_TILED_X_THICK,
+	DC_ARRAY_PRT_3D_TILED_THICK,
+};
+
+enum tile_mode_values {
+	DC_ADDR_SURF_MICRO_TILING_DISPLAY = 0x0,
+	DC_ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1,
+};
+
+enum swizzle_mode_values {
+	DC_SW_LINEAR = 0,
+	DC_SW_256B_S = 1,
+	DC_SW_256_D = 2,
+	DC_SW_256_R = 3,
+	DC_SW_4KB_S = 5,
+	DC_SW_4KB_D = 6,
+	DC_SW_4KB_R = 7,
+	DC_SW_64KB_S = 9,
+	DC_SW_64KB_D = 10,
+	DC_SW_64KB_R = 11,
+	DC_SW_VAR_S = 13,
+	DC_SW_VAR_D = 14,
+	DC_SW_VAR_R = 15,
+	DC_SW_64KB_S_T = 17,
+	DC_SW_64KB_D_T = 18,
+	DC_SW_4KB_S_X = 21,
+	DC_SW_4KB_D_X = 22,
+	DC_SW_4KB_R_X = 23,
+	DC_SW_64KB_S_X = 25,
+	DC_SW_64KB_D_X = 26,
+	DC_SW_64KB_R_X = 27,
+	DC_SW_VAR_S_X = 29,
+	DC_SW_VAR_D_X = 30,
+	DC_SW_VAR_R_X = 31,
+	DC_SW_MAX
+};
+
+union dc_tiling_info {
+
+	struct {
+		/* Specifies the number of memory banks for tiling
+		 *	purposes.
+		 * Only applies to 2D and 3D tiling modes.
+		 *	POSSIBLE VALUES: 2,4,8,16
+		 */
+		unsigned int num_banks;
+		/* Specifies the number of tiles in the x direction
+		 *	to be incorporated into the same bank.
+		 * Only applies to 2D and 3D tiling modes.
+		 *	POSSIBLE VALUES: 1,2,4,8
+		 */
+		unsigned int bank_width;
+		unsigned int bank_width_c;
+		/* Specifies the number of tiles in the y direction to
+		 *	be incorporated into the same bank.
+		 * Only applies to 2D and 3D tiling modes.
+		 *	POSSIBLE VALUES: 1,2,4,8
+		 */
+		unsigned int bank_height;
+		unsigned int bank_height_c;
+		/* Specifies the macro tile aspect ratio. Only applies
+		 * to 2D and 3D tiling modes.
+		 */
+		unsigned int tile_aspect;
+		unsigned int tile_aspect_c;
+		/* Specifies the number of bytes that will be stored
+		 *	contiguously for each tile.
+		 * If the tile data requires more storage than this
+		 *	amount, it is split into multiple slices.
+		 * This field must not be larger than
+		 *	GB_ADDR_CONFIG.DRAM_ROW_SIZE.
+		 * Only applies to 2D and 3D tiling modes.
+		 * For color render targets, TILE_SPLIT >= 256B.
+		 */
+		enum tile_split_values tile_split;
+		enum tile_split_values tile_split_c;
+		/* Specifies the addressing within a tile.
+		 *	0x0 - DISPLAY_MICRO_TILING
+		 *	0x1 - THIN_MICRO_TILING
+		 *	0x2 - DEPTH_MICRO_TILING
+		 *	0x3 - ROTATED_MICRO_TILING
+		 */
+		enum tile_mode_values tile_mode;
+		enum tile_mode_values tile_mode_c;
+		/* Specifies the number of pipes and how they are
+		 *	interleaved in the surface.
+		 * Refer to memory addressing document for complete
+		 *	details and constraints.
+		 */
+		unsigned int pipe_config;
+		/* Specifies the tiling mode of the surface.
+		 * THIN tiles use an 8x8x1 tile size.
+		 * THICK tiles use an 8x8x4 tile size.
+		 * 2D tiling modes rotate banks for successive Z slices
+		 * 3D tiling modes rotate pipes and banks for Z slices
+		 * Refer to memory addressing document for complete
+		 *	details and constraints.
+		 */
+		enum array_mode_values array_mode;
+	} gfx8;
+
+	struct {
+		unsigned int num_pipes;
+		unsigned int num_banks;
+		unsigned int pipe_interleave;
+		unsigned int num_shader_engines;
+		unsigned int num_rb_per_se;
+		unsigned int max_compressed_frags;
+		bool shaderEnable;
+
+		enum swizzle_mode_values swizzle;
+		bool meta_linear;
+		bool rb_aligned;
+		bool pipe_aligned;
+	} gfx9;
+};
+
+/* Rotation angle */
+enum dc_rotation_angle {
+	ROTATION_ANGLE_0 = 0,
+	ROTATION_ANGLE_90,
+	ROTATION_ANGLE_180,
+	ROTATION_ANGLE_270,
+	ROTATION_ANGLE_COUNT
+};
+
+enum dc_scan_direction {
+	SCAN_DIRECTION_UNKNOWN = 0,
+	SCAN_DIRECTION_HORIZONTAL = 1,  /* 0, 180 rotation */
+	SCAN_DIRECTION_VERTICAL = 2,    /* 90, 270 rotation */
+};
+
+struct dc_cursor_position {
+	uint32_t x;
+	uint32_t y;
+
+	uint32_t x_hotspot;
+	uint32_t y_hotspot;
+
+	/*
+	 * This parameter indicates whether HW cursor should be enabled
+	 */
+	bool enable;
+
+};
+
+struct dc_cursor_mi_param {
+	unsigned int pixel_clk_khz;
+	unsigned int ref_clk_khz;
+	unsigned int viewport_x_start;
+	unsigned int viewport_width;
+	struct fixed31_32 h_scale_ratio;
+};
+
+/* IPP related types */
+
+enum {
+	GAMMA_RGB_256_ENTRIES = 256,
+	GAMMA_RGB_FLOAT_1024_ENTRIES = 1024,
+	GAMMA_MAX_ENTRIES = 1024
+};
+
+enum dc_gamma_type {
+	GAMMA_RGB_256 = 1,
+	GAMMA_RGB_FLOAT_1024 = 2
+};
+
+struct dc_gamma {
+	struct kref refcount;
+	enum dc_gamma_type type;
+	unsigned int num_entries;
+
+	struct dc_gamma_entries {
+		struct fixed31_32 red[GAMMA_MAX_ENTRIES];
+		struct fixed31_32 green[GAMMA_MAX_ENTRIES];
+		struct fixed31_32 blue[GAMMA_MAX_ENTRIES];
+	} entries;
+
+	/* private to DC core */
+	struct dc_context *ctx;
+};
+
+/* Used by both ipp amd opp functions*/
+/* TODO: to be consolidated with enum color_space */
+
+/*
+ * This enum is for programming CURSOR_MODE register field. What this register
+ * should be programmed to depends on OS requested cursor shape flags and what
+ * we stored in the cursor surface.
+ */
+enum dc_cursor_color_format {
+	CURSOR_MODE_MONO,
+	CURSOR_MODE_COLOR_1BIT_AND,
+	CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA,
+	CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA
+};
+
+/*
+ * This is all the parameters required by DAL in order to update the cursor
+ * attributes, including the new cursor image surface address, size, hotspot
+ * location, color format, etc.
+ */
+
+union dc_cursor_attribute_flags {
+	struct {
+		uint32_t ENABLE_MAGNIFICATION:1;
+		uint32_t INVERSE_TRANSPARENT_CLAMPING:1;
+		uint32_t HORIZONTAL_MIRROR:1;
+		uint32_t VERTICAL_MIRROR:1;
+		uint32_t INVERT_PIXEL_DATA:1;
+		uint32_t ZERO_EXPANSION:1;
+		uint32_t MIN_MAX_INVERT:1;
+		uint32_t RESERVED:25;
+	} bits;
+	uint32_t value;
+};
+
+struct dc_cursor_attributes {
+	PHYSICAL_ADDRESS_LOC address;
+	uint32_t pitch;
+
+	/* Width and height should correspond to cursor surface width x heigh */
+	uint32_t width;
+	uint32_t height;
+
+	enum dc_cursor_color_format color_format;
+
+	/* In case we support HW Cursor rotation in the future */
+	enum dc_rotation_angle rotation_angle;
+
+	union dc_cursor_attribute_flags attribute_flags;
+};
+
+/* OPP */
+
+enum dc_color_space {
+	COLOR_SPACE_UNKNOWN,
+	COLOR_SPACE_SRGB,
+	COLOR_SPACE_SRGB_LIMITED,
+	COLOR_SPACE_YCBCR601,
+	COLOR_SPACE_YCBCR709,
+	COLOR_SPACE_YCBCR601_LIMITED,
+	COLOR_SPACE_YCBCR709_LIMITED,
+	COLOR_SPACE_2020_RGB_FULLRANGE,
+	COLOR_SPACE_2020_RGB_LIMITEDRANGE,
+	COLOR_SPACE_2020_YCBCR,
+	COLOR_SPACE_ADOBERGB,
+};
+
+enum dc_dither_option {
+	DITHER_OPTION_DEFAULT,
+	DITHER_OPTION_DISABLE,
+	DITHER_OPTION_FM6,
+	DITHER_OPTION_FM8,
+	DITHER_OPTION_FM10,
+	DITHER_OPTION_SPATIAL6_FRAME_RANDOM,
+	DITHER_OPTION_SPATIAL8_FRAME_RANDOM,
+	DITHER_OPTION_SPATIAL10_FRAME_RANDOM,
+	DITHER_OPTION_SPATIAL6,
+	DITHER_OPTION_SPATIAL8,
+	DITHER_OPTION_SPATIAL10,
+	DITHER_OPTION_TRUN6,
+	DITHER_OPTION_TRUN8,
+	DITHER_OPTION_TRUN10,
+	DITHER_OPTION_TRUN10_SPATIAL8,
+	DITHER_OPTION_TRUN10_SPATIAL6,
+	DITHER_OPTION_TRUN10_FM8,
+	DITHER_OPTION_TRUN10_FM6,
+	DITHER_OPTION_TRUN10_SPATIAL8_FM6,
+	DITHER_OPTION_SPATIAL10_FM8,
+	DITHER_OPTION_SPATIAL10_FM6,
+	DITHER_OPTION_TRUN8_SPATIAL6,
+	DITHER_OPTION_TRUN8_FM6,
+	DITHER_OPTION_SPATIAL8_FM6,
+	DITHER_OPTION_MAX = DITHER_OPTION_SPATIAL8_FM6,
+	DITHER_OPTION_INVALID
+};
+
+enum dc_quantization_range {
+	QUANTIZATION_RANGE_UNKNOWN,
+	QUANTIZATION_RANGE_FULL,
+	QUANTIZATION_RANGE_LIMITED
+};
+
+/* XFM */
+
+/* used in  struct dc_plane_state */
+struct scaling_taps {
+	uint32_t v_taps;
+	uint32_t h_taps;
+	uint32_t v_taps_c;
+	uint32_t h_taps_c;
+};
+
+enum dc_timing_standard {
+	TIMING_STANDARD_UNDEFINED,
+	TIMING_STANDARD_DMT,
+	TIMING_STANDARD_GTF,
+	TIMING_STANDARD_CVT,
+	TIMING_STANDARD_CVT_RB,
+	TIMING_STANDARD_CEA770,
+	TIMING_STANDARD_CEA861,
+	TIMING_STANDARD_HDMI,
+	TIMING_STANDARD_TV_NTSC,
+	TIMING_STANDARD_TV_NTSC_J,
+	TIMING_STANDARD_TV_PAL,
+	TIMING_STANDARD_TV_PAL_M,
+	TIMING_STANDARD_TV_PAL_CN,
+	TIMING_STANDARD_TV_SECAM,
+	TIMING_STANDARD_EXPLICIT,
+	/*!< For explicit timings from EDID, VBIOS, etc.*/
+	TIMING_STANDARD_USER_OVERRIDE,
+	/*!< For mode timing override by user*/
+	TIMING_STANDARD_MAX
+};
+
+
+
+enum dc_color_depth {
+	COLOR_DEPTH_UNDEFINED,
+	COLOR_DEPTH_666,
+	COLOR_DEPTH_888,
+	COLOR_DEPTH_101010,
+	COLOR_DEPTH_121212,
+	COLOR_DEPTH_141414,
+	COLOR_DEPTH_161616,
+	COLOR_DEPTH_COUNT
+};
+
+enum dc_pixel_encoding {
+	PIXEL_ENCODING_UNDEFINED,
+	PIXEL_ENCODING_RGB,
+	PIXEL_ENCODING_YCBCR422,
+	PIXEL_ENCODING_YCBCR444,
+	PIXEL_ENCODING_YCBCR420,
+	PIXEL_ENCODING_COUNT
+};
+
+enum dc_aspect_ratio {
+	ASPECT_RATIO_NO_DATA,
+	ASPECT_RATIO_4_3,
+	ASPECT_RATIO_16_9,
+	ASPECT_RATIO_64_27,
+	ASPECT_RATIO_256_135,
+	ASPECT_RATIO_FUTURE
+};
+
+enum scanning_type {
+	SCANNING_TYPE_NODATA = 0,
+	SCANNING_TYPE_OVERSCAN,
+	SCANNING_TYPE_UNDERSCAN,
+	SCANNING_TYPE_FUTURE,
+	SCANNING_TYPE_UNDEFINED
+};
+
+struct dc_crtc_timing_flags {
+	uint32_t INTERLACE :1;
+	uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1,
+	 it is positive polarity --reversed with dal1 or video bios define*/
+	uint32_t VSYNC_POSITIVE_POLARITY :1; /* when set to 1,
+	 it is positive polarity --reversed with dal1 or video bios define*/
+
+	uint32_t HORZ_COUNT_BY_TWO:1;
+
+	uint32_t EXCLUSIVE_3D :1; /* if this bit set,
+	 timing can be driven in 3D format only
+	 and there is no corresponding 2D timing*/
+	uint32_t RIGHT_EYE_3D_POLARITY :1; /* 1 - means right eye polarity
+	 (right eye = '1', left eye = '0') */
+	uint32_t SUB_SAMPLE_3D :1; /* 1 - means left/right  images subsampled
+	 when mixed into 3D image. 0 - means summation (3D timing is doubled)*/
+	uint32_t USE_IN_3D_VIEW_ONLY :1; /* Do not use this timing in 2D View,
+	 because corresponding 2D timing also present in the list*/
+	uint32_t STEREO_3D_PREFERENCE :1; /* Means this is 2D timing
+	 and we want to match priority of corresponding 3D timing*/
+	uint32_t Y_ONLY :1;
+
+	uint32_t YCBCR420 :1; /* TODO: shouldn't need this flag, should be a separate pixel format */
+	uint32_t DTD_COUNTER :5; /* values 1 to 16 */
+
+	uint32_t FORCE_HDR :1;
+
+	/* HDMI 2.0 - Support scrambling for TMDS character
+	 * rates less than or equal to 340Mcsc */
+	uint32_t LTE_340MCSC_SCRAMBLE:1;
+
+};
+
+enum dc_timing_3d_format {
+	TIMING_3D_FORMAT_NONE,
+	TIMING_3D_FORMAT_FRAME_ALTERNATE, /* No stereosync at all*/
+	TIMING_3D_FORMAT_INBAND_FA, /* Inband Frame Alternate (DVI/DP)*/
+	TIMING_3D_FORMAT_DP_HDMI_INBAND_FA, /* Inband FA to HDMI Frame Pack*/
+	/* for active DP-HDMI dongle*/
+	TIMING_3D_FORMAT_SIDEBAND_FA, /* Sideband Frame Alternate (eDP)*/
+	TIMING_3D_FORMAT_HW_FRAME_PACKING,
+	TIMING_3D_FORMAT_SW_FRAME_PACKING,
+	TIMING_3D_FORMAT_ROW_INTERLEAVE,
+	TIMING_3D_FORMAT_COLUMN_INTERLEAVE,
+	TIMING_3D_FORMAT_PIXEL_INTERLEAVE,
+	TIMING_3D_FORMAT_SIDE_BY_SIDE,
+	TIMING_3D_FORMAT_TOP_AND_BOTTOM,
+	TIMING_3D_FORMAT_SBS_SW_PACKED,
+	/* Side-by-side, packed by application/driver into 2D frame*/
+	TIMING_3D_FORMAT_TB_SW_PACKED,
+	/* Top-and-bottom, packed by application/driver into 2D frame*/
+
+	TIMING_3D_FORMAT_MAX,
+};
+
+
+struct dc_crtc_timing {
+
+	uint32_t h_total;
+	uint32_t h_border_left;
+	uint32_t h_addressable;
+	uint32_t h_border_right;
+	uint32_t h_front_porch;
+	uint32_t h_sync_width;
+
+	uint32_t v_total;
+	uint32_t v_border_top;
+	uint32_t v_addressable;
+	uint32_t v_border_bottom;
+	uint32_t v_front_porch;
+	uint32_t v_sync_width;
+
+	uint32_t pix_clk_khz;
+
+	uint32_t vic;
+	uint32_t hdmi_vic;
+	enum dc_timing_3d_format timing_3d_format;
+	enum dc_color_depth display_color_depth;
+	enum dc_pixel_encoding pixel_encoding;
+	enum dc_aspect_ratio aspect_ratio;
+	enum scanning_type scan_type;
+
+	struct dc_crtc_timing_flags flags;
+};
+
+#define MAX_TG_COLOR_VALUE 0x3FF
+struct tg_color {
+	/* Maximum 10 bits color value */
+	uint16_t color_r_cr;
+	uint16_t color_g_y;
+	uint16_t color_b_cb;
+};
+
+#endif /* DC_HW_TYPES_H */
+

+ 652 - 0
drivers/gpu/drm/amd/display/dc/dc_types.h

@@ -0,0 +1,652 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef DC_TYPES_H_
+#define DC_TYPES_H_
+
+#include "fixed32_32.h"
+#include "fixed31_32.h"
+#include "irq_types.h"
+#include "dc_dp_types.h"
+#include "dc_hw_types.h"
+#include "dal_types.h"
+#include "grph_object_defs.h"
+
+/* forward declarations */
+struct dc_plane_state;
+struct dc_stream_state;
+struct dc_link;
+struct dc_sink;
+struct dal;
+
+/********************************
+ * Environment definitions
+ ********************************/
+enum dce_environment {
+	DCE_ENV_PRODUCTION_DRV = 0,
+	/* Emulation on FPGA, in "Maximus" System.
+	 * This environment enforces that *only* DC registers accessed.
+	 * (access to non-DC registers will hang FPGA) */
+	DCE_ENV_FPGA_MAXIMUS,
+	/* Emulation on real HW or on FPGA. Used by Diagnostics, enforces
+	 * requirements of Diagnostics team. */
+	DCE_ENV_DIAG
+};
+
+/* Note: use these macro definitions instead of direct comparison! */
+#define IS_FPGA_MAXIMUS_DC(dce_environment) \
+	(dce_environment == DCE_ENV_FPGA_MAXIMUS)
+
+#define IS_DIAG_DC(dce_environment) \
+	(IS_FPGA_MAXIMUS_DC(dce_environment) || (dce_environment == DCE_ENV_DIAG))
+
+struct hw_asic_id {
+	uint32_t chip_id;
+	uint32_t chip_family;
+	uint32_t pci_revision_id;
+	uint32_t hw_internal_rev;
+	uint32_t vram_type;
+	uint32_t vram_width;
+	uint32_t feature_flags;
+	uint32_t fake_paths_num;
+	void *atombios_base_address;
+};
+
+struct dc_context {
+	struct dc *dc;
+
+	void *driver_context; /* e.g. amdgpu_device */
+
+	struct dal_logger *logger;
+	void *cgs_device;
+
+	enum dce_environment dce_environment;
+	struct hw_asic_id asic_id;
+
+	/* todo: below should probably move to dc.  to facilitate removal
+	 * of AS we will store these here
+	 */
+	enum dce_version dce_version;
+	struct dc_bios *dc_bios;
+	bool created_bios;
+	struct gpio_service *gpio_service;
+	struct i2caux *i2caux;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+	uint64_t fbc_gpu_addr;
+#endif
+};
+
+
+#define MAX_EDID_BUFFER_SIZE 512
+#define EDID_BLOCK_SIZE 128
+#define MAX_SURFACE_NUM 4
+#define NUM_PIXEL_FORMATS 10
+
+#include "dc_ddc_types.h"
+
+enum tiling_mode {
+	TILING_MODE_INVALID,
+	TILING_MODE_LINEAR,
+	TILING_MODE_TILED,
+	TILING_MODE_COUNT
+};
+
+enum view_3d_format {
+	VIEW_3D_FORMAT_NONE = 0,
+	VIEW_3D_FORMAT_FRAME_SEQUENTIAL,
+	VIEW_3D_FORMAT_SIDE_BY_SIDE,
+	VIEW_3D_FORMAT_TOP_AND_BOTTOM,
+	VIEW_3D_FORMAT_COUNT,
+	VIEW_3D_FORMAT_FIRST = VIEW_3D_FORMAT_FRAME_SEQUENTIAL
+};
+
+enum plane_stereo_format {
+	PLANE_STEREO_FORMAT_NONE = 0,
+	PLANE_STEREO_FORMAT_SIDE_BY_SIDE = 1,
+	PLANE_STEREO_FORMAT_TOP_AND_BOTTOM = 2,
+	PLANE_STEREO_FORMAT_FRAME_ALTERNATE = 3,
+	PLANE_STEREO_FORMAT_ROW_INTERLEAVED = 5,
+	PLANE_STEREO_FORMAT_COLUMN_INTERLEAVED = 6,
+	PLANE_STEREO_FORMAT_CHECKER_BOARD = 7
+};
+
+/* TODO: Find way to calculate number of bits
+ *  Please increase if pixel_format enum increases
+ * num  from  PIXEL_FORMAT_INDEX8 to PIXEL_FORMAT_444BPP32
+ */
+
+enum dc_edid_connector_type {
+	EDID_CONNECTOR_UNKNOWN = 0,
+	EDID_CONNECTOR_ANALOG = 1,
+	EDID_CONNECTOR_DIGITAL = 10,
+	EDID_CONNECTOR_DVI = 11,
+	EDID_CONNECTOR_HDMIA = 12,
+	EDID_CONNECTOR_MDDI = 14,
+	EDID_CONNECTOR_DISPLAYPORT = 15
+};
+
+enum dc_edid_status {
+	EDID_OK,
+	EDID_BAD_INPUT,
+	EDID_NO_RESPONSE,
+	EDID_BAD_CHECKSUM,
+	EDID_THE_SAME,
+};
+
+/* audio capability from EDID*/
+struct dc_cea_audio_mode {
+	uint8_t format_code; /* ucData[0] [6:3]*/
+	uint8_t channel_count; /* ucData[0] [2:0]*/
+	uint8_t sample_rate; /* ucData[1]*/
+	union {
+		uint8_t sample_size; /* for LPCM*/
+		/*  for Audio Formats 2-8 (Max bit rate divided by 8 kHz)*/
+		uint8_t max_bit_rate;
+		uint8_t audio_codec_vendor_specific; /* for Audio Formats 9-15*/
+	};
+};
+
+struct dc_edid {
+	uint32_t length;
+	uint8_t raw_edid[MAX_EDID_BUFFER_SIZE];
+};
+
+/* When speaker location data block is not available, DEFAULT_SPEAKER_LOCATION
+ * is used. In this case we assume speaker location are: front left, front
+ * right and front center. */
+#define DEFAULT_SPEAKER_LOCATION 5
+
+#define DC_MAX_AUDIO_DESC_COUNT 16
+
+#define AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 20
+
+union display_content_support {
+	unsigned int raw;
+	struct {
+		unsigned int valid_content_type :1;
+		unsigned int game_content :1;
+		unsigned int cinema_content :1;
+		unsigned int photo_content :1;
+		unsigned int graphics_content :1;
+		unsigned int reserved :27;
+	} bits;
+};
+
+struct dc_edid_caps {
+	/* sink identification */
+	uint16_t manufacturer_id;
+	uint16_t product_id;
+	uint32_t serial_number;
+	uint8_t manufacture_week;
+	uint8_t manufacture_year;
+	uint8_t display_name[AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS];
+
+	/* audio caps */
+	uint8_t speaker_flags;
+	uint32_t audio_mode_count;
+	struct dc_cea_audio_mode audio_modes[DC_MAX_AUDIO_DESC_COUNT];
+	uint32_t audio_latency;
+	uint32_t video_latency;
+
+	union display_content_support content_support;
+
+	uint8_t qs_bit;
+	uint8_t qy_bit;
+
+	/*HDMI 2.0 caps*/
+	bool lte_340mcsc_scramble;
+
+	bool edid_hdmi;
+};
+
+struct view {
+	uint32_t width;
+	uint32_t height;
+};
+
+struct dc_mode_flags {
+	/* note: part of refresh rate flag*/
+	uint32_t INTERLACE :1;
+	/* native display timing*/
+	uint32_t NATIVE :1;
+	/* preferred is the recommended mode, one per display */
+	uint32_t PREFERRED :1;
+	/* true if this mode should use reduced blanking timings
+	 *_not_ related to the Reduced Blanking adjustment*/
+	uint32_t REDUCED_BLANKING :1;
+	/* note: part of refreshrate flag*/
+	uint32_t VIDEO_OPTIMIZED_RATE :1;
+	/* should be reported to upper layers as mode_flags*/
+	uint32_t PACKED_PIXEL_FORMAT :1;
+	/*< preferred view*/
+	uint32_t PREFERRED_VIEW :1;
+	/* this timing should be used only in tiled mode*/
+	uint32_t TILED_MODE :1;
+	uint32_t DSE_MODE :1;
+	/* Refresh rate divider when Miracast sink is using a
+	 different rate than the output display device
+	 Must be zero for wired displays and non-zero for
+	 Miracast displays*/
+	uint32_t MIRACAST_REFRESH_DIVIDER;
+};
+
+
+enum dc_timing_source {
+	TIMING_SOURCE_UNDEFINED,
+
+	/* explicitly specifed by user, most important*/
+	TIMING_SOURCE_USER_FORCED,
+	TIMING_SOURCE_USER_OVERRIDE,
+	TIMING_SOURCE_CUSTOM,
+	TIMING_SOURCE_EXPLICIT,
+
+	/* explicitly specified by the display device, more important*/
+	TIMING_SOURCE_EDID_CEA_SVD_3D,
+	TIMING_SOURCE_EDID_CEA_SVD_PREFERRED,
+	TIMING_SOURCE_EDID_CEA_SVD_420,
+	TIMING_SOURCE_EDID_DETAILED,
+	TIMING_SOURCE_EDID_ESTABLISHED,
+	TIMING_SOURCE_EDID_STANDARD,
+	TIMING_SOURCE_EDID_CEA_SVD,
+	TIMING_SOURCE_EDID_CVT_3BYTE,
+	TIMING_SOURCE_EDID_4BYTE,
+	TIMING_SOURCE_VBIOS,
+	TIMING_SOURCE_CV,
+	TIMING_SOURCE_TV,
+	TIMING_SOURCE_HDMI_VIC,
+
+	/* implicitly specified by display device, still safe but less important*/
+	TIMING_SOURCE_DEFAULT,
+
+	/* only used for custom base modes */
+	TIMING_SOURCE_CUSTOM_BASE,
+
+	/* these timing might not work, least important*/
+	TIMING_SOURCE_RANGELIMIT,
+	TIMING_SOURCE_OS_FORCED,
+	TIMING_SOURCE_IMPLICIT,
+
+	/* only used by default mode list*/
+	TIMING_SOURCE_BASICMODE,
+
+	TIMING_SOURCE_COUNT
+};
+
+
+struct stereo_3d_features {
+	bool supported			;
+	bool allTimings			;
+	bool cloneMode			;
+	bool scaling			;
+	bool singleFrameSWPacked;
+};
+
+enum dc_timing_support_method {
+	TIMING_SUPPORT_METHOD_UNDEFINED,
+	TIMING_SUPPORT_METHOD_EXPLICIT,
+	TIMING_SUPPORT_METHOD_IMPLICIT,
+	TIMING_SUPPORT_METHOD_NATIVE
+};
+
+struct dc_mode_info {
+	uint32_t pixel_width;
+	uint32_t pixel_height;
+	uint32_t field_rate;
+	/* Vertical refresh rate for progressive modes.
+	* Field rate for interlaced modes.*/
+
+	enum dc_timing_standard timing_standard;
+	enum dc_timing_source timing_source;
+	struct dc_mode_flags flags;
+};
+
+enum dc_power_state {
+	DC_POWER_STATE_ON = 1,
+	DC_POWER_STATE_STANDBY,
+	DC_POWER_STATE_SUSPEND,
+	DC_POWER_STATE_OFF
+};
+
+/* DC PowerStates */
+enum dc_video_power_state {
+	DC_VIDEO_POWER_UNSPECIFIED = 0,
+	DC_VIDEO_POWER_ON = 1,
+	DC_VIDEO_POWER_STANDBY,
+	DC_VIDEO_POWER_SUSPEND,
+	DC_VIDEO_POWER_OFF,
+	DC_VIDEO_POWER_HIBERNATE,
+	DC_VIDEO_POWER_SHUTDOWN,
+	DC_VIDEO_POWER_ULPS,	/* BACO or Ultra-Light-Power-State */
+	DC_VIDEO_POWER_AFTER_RESET,
+	DC_VIDEO_POWER_MAXIMUM
+};
+
+enum dc_acpi_cm_power_state {
+	DC_ACPI_CM_POWER_STATE_D0 = 1,
+	DC_ACPI_CM_POWER_STATE_D1 = 2,
+	DC_ACPI_CM_POWER_STATE_D2 = 4,
+	DC_ACPI_CM_POWER_STATE_D3 = 8
+};
+
+enum dc_connection_type {
+	dc_connection_none,
+	dc_connection_single,
+	dc_connection_mst_branch,
+	dc_connection_active_dongle
+};
+
+struct dc_csc_adjustments {
+	struct fixed31_32 contrast;
+	struct fixed31_32 saturation;
+	struct fixed31_32 brightness;
+	struct fixed31_32 hue;
+};
+
+enum {
+	MAX_LANES = 2,
+	MAX_COFUNC_PATH = 6,
+	LAYER_INDEX_PRIMARY = -1,
+};
+
+enum dpcd_downstream_port_max_bpc {
+	DOWN_STREAM_MAX_8BPC = 0,
+	DOWN_STREAM_MAX_10BPC,
+	DOWN_STREAM_MAX_12BPC,
+	DOWN_STREAM_MAX_16BPC
+};
+struct dc_dongle_caps {
+	/* dongle type (DP converter, CV smart dongle) */
+	enum display_dongle_type dongle_type;
+	bool extendedCapValid;
+	/* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+	indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
+	bool is_dp_hdmi_s3d_converter;
+	bool is_dp_hdmi_ycbcr422_pass_through;
+	bool is_dp_hdmi_ycbcr420_pass_through;
+	bool is_dp_hdmi_ycbcr422_converter;
+	bool is_dp_hdmi_ycbcr420_converter;
+	uint32_t dp_hdmi_max_bpc;
+	uint32_t dp_hdmi_max_pixel_clk;
+};
+/* Scaling format */
+enum scaling_transformation {
+	SCALING_TRANSFORMATION_UNINITIALIZED,
+	SCALING_TRANSFORMATION_IDENTITY = 0x0001,
+	SCALING_TRANSFORMATION_CENTER_TIMING = 0x0002,
+	SCALING_TRANSFORMATION_FULL_SCREEN_SCALE = 0x0004,
+	SCALING_TRANSFORMATION_PRESERVE_ASPECT_RATIO_SCALE = 0x0008,
+	SCALING_TRANSFORMATION_DAL_DECIDE = 0x0010,
+	SCALING_TRANSFORMATION_INVALID = 0x80000000,
+
+	/* Flag the first and last */
+	SCALING_TRANSFORMATION_BEGING = SCALING_TRANSFORMATION_IDENTITY,
+	SCALING_TRANSFORMATION_END =
+		SCALING_TRANSFORMATION_PRESERVE_ASPECT_RATIO_SCALE
+};
+
+enum display_content_type {
+	DISPLAY_CONTENT_TYPE_NO_DATA = 0,
+	DISPLAY_CONTENT_TYPE_GRAPHICS = 1,
+	DISPLAY_CONTENT_TYPE_PHOTO = 2,
+	DISPLAY_CONTENT_TYPE_CINEMA = 4,
+	DISPLAY_CONTENT_TYPE_GAME = 8
+};
+
+/* audio*/
+
+union audio_sample_rates {
+	struct sample_rates {
+		uint8_t RATE_32:1;
+		uint8_t RATE_44_1:1;
+		uint8_t RATE_48:1;
+		uint8_t RATE_88_2:1;
+		uint8_t RATE_96:1;
+		uint8_t RATE_176_4:1;
+		uint8_t RATE_192:1;
+	} rate;
+
+	uint8_t all;
+};
+
+struct audio_speaker_flags {
+	uint32_t FL_FR:1;
+	uint32_t LFE:1;
+	uint32_t FC:1;
+	uint32_t RL_RR:1;
+	uint32_t RC:1;
+	uint32_t FLC_FRC:1;
+	uint32_t RLC_RRC:1;
+	uint32_t SUPPORT_AI:1;
+};
+
+struct audio_speaker_info {
+	uint32_t ALLSPEAKERS:7;
+	uint32_t SUPPORT_AI:1;
+};
+
+
+struct audio_info_flags {
+
+	union {
+
+		struct audio_speaker_flags speaker_flags;
+		struct audio_speaker_info   info;
+
+		uint8_t all;
+	};
+};
+
+enum audio_format_code {
+	AUDIO_FORMAT_CODE_FIRST = 1,
+	AUDIO_FORMAT_CODE_LINEARPCM = AUDIO_FORMAT_CODE_FIRST,
+
+	AUDIO_FORMAT_CODE_AC3,
+	/*Layers 1 & 2 */
+	AUDIO_FORMAT_CODE_MPEG1,
+	/*MPEG1 Layer 3 */
+	AUDIO_FORMAT_CODE_MP3,
+	/*multichannel */
+	AUDIO_FORMAT_CODE_MPEG2,
+	AUDIO_FORMAT_CODE_AAC,
+	AUDIO_FORMAT_CODE_DTS,
+	AUDIO_FORMAT_CODE_ATRAC,
+	AUDIO_FORMAT_CODE_1BITAUDIO,
+	AUDIO_FORMAT_CODE_DOLBYDIGITALPLUS,
+	AUDIO_FORMAT_CODE_DTS_HD,
+	AUDIO_FORMAT_CODE_MAT_MLP,
+	AUDIO_FORMAT_CODE_DST,
+	AUDIO_FORMAT_CODE_WMAPRO,
+	AUDIO_FORMAT_CODE_LAST,
+	AUDIO_FORMAT_CODE_COUNT =
+		AUDIO_FORMAT_CODE_LAST - AUDIO_FORMAT_CODE_FIRST
+};
+
+struct audio_mode {
+	 /* ucData[0] [6:3] */
+	enum audio_format_code format_code;
+	/* ucData[0] [2:0] */
+	uint8_t channel_count;
+	/* ucData[1] */
+	union audio_sample_rates sample_rates;
+	union {
+		/* for LPCM */
+		uint8_t sample_size;
+		/* for Audio Formats 2-8 (Max bit rate divided by 8 kHz) */
+		uint8_t max_bit_rate;
+		/* for Audio Formats 9-15 */
+		uint8_t vendor_specific;
+	};
+};
+
+struct audio_info {
+	struct audio_info_flags flags;
+	uint32_t video_latency;
+	uint32_t audio_latency;
+	uint32_t display_index;
+	uint8_t display_name[AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS];
+	uint32_t manufacture_id;
+	uint32_t product_id;
+	/* PortID used for ContainerID when defined */
+	uint32_t port_id[2];
+	uint32_t mode_count;
+	/* this field must be last in this struct */
+	struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];
+};
+
+struct freesync_context {
+	bool supported;
+	bool enabled;
+	bool active;
+
+	unsigned int min_refresh_in_micro_hz;
+	unsigned int nominal_refresh_in_micro_hz;
+};
+
+struct psr_config {
+	unsigned char psr_version;
+	unsigned int psr_rfb_setup_time;
+	bool psr_exit_link_training_required;
+
+	bool psr_frame_capture_indication_req;
+	unsigned int psr_sdp_transmit_line_num_deadline;
+};
+
+union dmcu_psr_level {
+	struct {
+		unsigned int SKIP_CRC:1;
+		unsigned int SKIP_DP_VID_STREAM_DISABLE:1;
+		unsigned int SKIP_PHY_POWER_DOWN:1;
+		unsigned int SKIP_AUX_ACK_CHECK:1;
+		unsigned int SKIP_CRTC_DISABLE:1;
+		unsigned int SKIP_AUX_RFB_CAPTURE_CHECK:1;
+		unsigned int SKIP_SMU_NOTIFICATION:1;
+		unsigned int SKIP_AUTO_STATE_ADVANCE:1;
+		unsigned int DISABLE_PSR_ENTRY_ABORT:1;
+		unsigned int SKIP_SINGLE_OTG_DISABLE:1;
+		unsigned int RESERVED:22;
+	} bits;
+	unsigned int u32all;
+};
+
+enum physical_phy_id {
+	PHYLD_0,
+	PHYLD_1,
+	PHYLD_2,
+	PHYLD_3,
+	PHYLD_4,
+	PHYLD_5,
+	PHYLD_6,
+	PHYLD_7,
+	PHYLD_8,
+	PHYLD_9,
+	PHYLD_COUNT,
+	PHYLD_UNKNOWN = (-1L)
+};
+
+enum phy_type {
+	PHY_TYPE_UNKNOWN  = 1,
+	PHY_TYPE_PCIE_PHY = 2,
+	PHY_TYPE_UNIPHY = 3,
+};
+
+struct psr_context {
+	/* ddc line */
+	enum channel_id channel;
+	/* Transmitter id */
+	enum transmitter transmitterId;
+	/* Engine Id is used for Dig Be source select */
+	enum engine_id engineId;
+	/* Controller Id used for Dig Fe source select */
+	enum controller_id controllerId;
+	/* Pcie or Uniphy */
+	enum phy_type phyType;
+	/* Physical PHY Id used by SMU interpretation */
+	enum physical_phy_id smuPhyId;
+	/* Vertical total pixels from crtc timing.
+	 * This is used for static screen detection.
+	 * ie. If we want to detect half a frame,
+	 * we use this to determine the hyst lines.
+	 */
+	unsigned int crtcTimingVerticalTotal;
+	/* PSR supported from panel capabilities and
+	 * current display configuration
+	 */
+	bool psrSupportedDisplayConfig;
+	/* Whether fast link training is supported by the panel */
+	bool psrExitLinkTrainingRequired;
+	/* If RFB setup time is greater than the total VBLANK time,
+	 * it is not possible for the sink to capture the video frame
+	 * in the same frame the SDP is sent. In this case,
+	 * the frame capture indication bit should be set and an extra
+	 * static frame should be transmitted to the sink.
+	 */
+	bool psrFrameCaptureIndicationReq;
+	/* Set the last possible line SDP may be transmitted without violating
+	 * the RFB setup time or entering the active video frame.
+	 */
+	unsigned int sdpTransmitLineNumDeadline;
+	/* The VSync rate in Hz used to calculate the
+	 * step size for smooth brightness feature
+	 */
+	unsigned int vsyncRateHz;
+	unsigned int skipPsrWaitForPllLock;
+	unsigned int numberOfControllers;
+	/* Unused, for future use. To indicate that first changed frame from
+	 * state3 shouldn't result in psr_inactive, but rather to perform
+	 * an automatic single frame rfb_update.
+	 */
+	bool rfb_update_auto_en;
+	/* Number of frame before entering static screen */
+	unsigned int timehyst_frames;
+	/* Partial frames before entering static screen */
+	unsigned int hyst_lines;
+	/* # of repeated AUX transaction attempts to make before
+	 * indicating failure to the driver
+	 */
+	unsigned int aux_repeats;
+	/* Controls hw blocks to power down during PSR active state */
+	union dmcu_psr_level psr_level;
+	/* Controls additional delay after remote frame capture before
+	 * continuing powerd own
+	 */
+	unsigned int frame_delay;
+};
+
+struct colorspace_transform {
+	struct fixed31_32 matrix[12];
+	bool enable_remap;
+};
+
+struct csc_transform {
+	uint16_t matrix[12];
+	bool enable_adjustment;
+};
+
+enum i2c_mot_mode {
+	I2C_MOT_UNDEF,
+	I2C_MOT_TRUE,
+	I2C_MOT_FALSE
+};
+
+#endif /* DC_TYPES_H_ */

+ 15 - 0
drivers/gpu/drm/amd/display/dc/dce/Makefile

@@ -0,0 +1,15 @@
+#
+# Makefile for common 'dce' logic
+# HW object file under this folder follow similar pattern for HW programming
+#   - register offset and/or shift + mask stored in the dec_hw struct
+#   - register programming through common macros that look up register 
+#     offset/shift/mask stored in dce_hw struct
+
+DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
+dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
+dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o
+
+
+AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE)

+ 485 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_abm.c

@@ -0,0 +1,485 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_abm.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed32_32.h"
+#include "dc.h"
+
+#include "atom.h"
+
+
+#define TO_DCE_ABM(abm)\
+	container_of(abm, struct dce_abm, base)
+
+#define REG(reg) \
+	(abm_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+	abm_dce->abm_shift->field_name, abm_dce->abm_mask->field_name
+
+#define CTX \
+	abm_dce->base.ctx
+
+#define MCP_ABM_LEVEL_SET 0x65
+#define MCP_ABM_PIPE_SET 0x66
+#define MCP_BL_SET 0x67
+
+#define MCP_DISABLE_ABM_IMMEDIATELY 255
+
+struct abm_backlight_registers {
+	unsigned int BL_PWM_CNTL;
+	unsigned int BL_PWM_CNTL2;
+	unsigned int BL_PWM_PERIOD_CNTL;
+	unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
+};
+
+/* registers setting needs to be save and restored used at InitBacklight */
+static struct abm_backlight_registers stored_backlight_registers = {0};
+
+
+static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce)
+{
+	uint64_t current_backlight;
+	uint32_t round_result;
+	uint32_t pwm_period_cntl, bl_period, bl_int_count;
+	uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
+	uint32_t bl_period_mask, bl_pwm_mask;
+
+	pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
+	REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
+	REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
+
+	bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
+	REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
+	REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
+
+	if (bl_int_count == 0)
+		bl_int_count = 16;
+
+	bl_period_mask = (1 << bl_int_count) - 1;
+	bl_period &= bl_period_mask;
+
+	bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
+
+	if (fractional_duty_cycle_en == 0)
+		bl_pwm &= bl_pwm_mask;
+	else
+		bl_pwm &= 0xFFFF;
+
+	current_backlight = bl_pwm << (1 + bl_int_count);
+
+	if (bl_period == 0)
+		bl_period = 0xFFFF;
+
+	current_backlight = div_u64(current_backlight, bl_period);
+	current_backlight = (current_backlight + 1) >> 1;
+
+	current_backlight = (uint64_t)(current_backlight) * bl_period;
+
+	round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
+
+	round_result = (round_result >> (bl_int_count-1)) & 1;
+
+	current_backlight >>= bl_int_count;
+	current_backlight += round_result;
+
+	return (uint32_t)(current_backlight);
+}
+
+static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
+{
+	uint32_t backlight_24bit;
+	uint32_t backlight_17bit;
+	uint32_t backlight_16bit;
+	uint32_t masked_pwm_period;
+	uint8_t rounding_bit;
+	uint8_t bit_count;
+	uint64_t active_duty_cycle;
+	uint32_t pwm_period_bitcnt;
+
+	/*
+	 * 1. Convert 8-bit value to 17 bit U1.16 format
+	 * (1 integer, 16 fractional bits)
+	 */
+
+	/* 1.1 multiply 8 bit value by 0x10101 to get a 24 bit value,
+	 * effectively multiplying value by 256/255
+	 * eg. for a level of 0xEF, backlight_24bit = 0xEF * 0x10101 = 0xEFEFEF
+	 */
+	backlight_24bit = level * 0x10101;
+
+	/* 1.2 The upper 16 bits of the 24 bit value is the fraction, lower 8
+	 * used for rounding, take most significant bit of fraction for
+	 * rounding, e.g. for 0xEFEFEF, rounding bit is 1
+	 */
+	rounding_bit = (backlight_24bit >> 7) & 1;
+
+	/* 1.3 Add the upper 16 bits of the 24 bit value with the rounding bit
+	 * resulting in a 17 bit value e.g. 0xEFF0 = (0xEFEFEF >> 8) + 1
+	 */
+	backlight_17bit = (backlight_24bit >> 8) + rounding_bit;
+
+	/*
+	 * 2. Find  16 bit backlight active duty cycle, where 0 <= backlight
+	 * active duty cycle <= backlight period
+	 */
+
+	/* 2.1 Apply bitmask for backlight period value based on value of BITCNT
+	 */
+	REG_GET_2(BL_PWM_PERIOD_CNTL,
+			BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
+			BL_PWM_PERIOD, &masked_pwm_period);
+
+	if (pwm_period_bitcnt == 0)
+		bit_count = 16;
+	else
+		bit_count = pwm_period_bitcnt;
+
+	/* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
+	masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
+
+	/* 2.2 Calculate integer active duty cycle required upper 16 bits
+	 * contain integer component, lower 16 bits contain fractional component
+	 * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
+	 */
+	active_duty_cycle = backlight_17bit * masked_pwm_period;
+
+	/* 2.3 Calculate 16 bit active duty cycle from integer and fractional
+	 * components shift by bitCount then mask 16 bits and add rounding bit
+	 * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
+	 */
+	backlight_16bit = active_duty_cycle >> bit_count;
+	backlight_16bit &= 0xFFFF;
+	backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
+
+	/*
+	 * 3. Program register with updated value
+	 */
+
+	/* 3.1 Lock group 2 backlight registers */
+
+	REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
+			BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
+			BL_PWM_GRP1_REG_LOCK, 1);
+
+	// 3.2 Write new active duty cycle
+	REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
+
+	/* 3.3 Unlock group 2 backlight registers */
+	REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+			BL_PWM_GRP1_REG_LOCK, 0);
+
+	/* 5.4.4 Wait for pending bit to be cleared */
+	REG_WAIT(BL_PWM_GRP1_REG_LOCK,
+			BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
+			1, 10000);
+}
+
+static void dmcu_set_backlight_level(
+	struct dce_abm *abm_dce,
+	uint32_t level,
+	uint32_t frame_ramp,
+	uint32_t controller_id)
+{
+	unsigned int backlight_16_bit = (level * 0x10101) >> 8;
+	unsigned int backlight_17_bit = backlight_16_bit +
+				(((backlight_16_bit & 0x80) >> 7) & 1);
+	uint32_t rampingBoundary = 0xFFFF;
+	uint32_t s2;
+
+	/* set ramping boundary */
+	REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
+
+	/* setDMCUParam_Pipe */
+	REG_UPDATE_2(MASTER_COMM_CMD_REG,
+			MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET,
+			MASTER_COMM_CMD_REG_BYTE1, controller_id);
+
+	/* notifyDMCUMsg */
+	REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+	/* waitDMCUReadyForCmd */
+	REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
+			0, 1, 80000);
+
+	/* setDMCUParam_BL */
+	REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_17_bit);
+
+	/* write ramp */
+	if (controller_id == 0)
+		frame_ramp = 0;
+	REG_WRITE(MASTER_COMM_DATA_REG1, frame_ramp);
+
+	/* setDMCUParam_Cmd */
+	REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_BL_SET);
+
+	/* notifyDMCUMsg */
+	REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+	/* UpdateRequestedBacklightLevel */
+	s2 = REG_READ(BIOS_SCRATCH_2);
+
+	s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+	level &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
+				ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+	s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+	REG_WRITE(BIOS_SCRATCH_2, s2);
+}
+
+static void dce_abm_init(struct abm *abm)
+{
+	struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+	unsigned int backlight = get_current_backlight_16_bit(abm_dce);
+
+	REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
+	REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
+	REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x103);
+	REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x101);
+	REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x101);
+
+	REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0,
+			ABM1_HG_NUM_OF_BINS_SEL, 0,
+			ABM1_HG_VMAX_SEL, 1,
+			ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0);
+
+	REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0,
+			ABM1_IPCSC_COEFF_SEL_R, 2,
+			ABM1_IPCSC_COEFF_SEL_G, 4,
+			ABM1_IPCSC_COEFF_SEL_B, 2);
+
+	REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL,
+			BL1_PWM_CURRENT_ABM_LEVEL, backlight);
+
+	REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL,
+			BL1_PWM_TARGET_ABM_LEVEL, backlight);
+
+	REG_UPDATE(BL1_PWM_USER_LEVEL,
+			BL1_PWM_USER_LEVEL, backlight);
+
+	REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
+			ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
+			ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000);
+
+	REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0,
+			ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1,
+			ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1,
+			ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
+}
+
+static unsigned int dce_abm_get_current_backlight_8_bit(struct abm *abm)
+{
+	struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+	unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
+
+	return (backlight >> 8);
+}
+
+static bool dce_abm_set_level(struct abm *abm, uint32_t level)
+{
+	struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+
+	REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+			1, 80000);
+
+	/* setDMCUParam_ABMLevel */
+	REG_UPDATE_2(MASTER_COMM_CMD_REG,
+			MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_LEVEL_SET,
+			MASTER_COMM_CMD_REG_BYTE2, level);
+
+	/* notifyDMCUMsg */
+	REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+	return true;
+}
+
+static bool dce_abm_immediate_disable(struct abm *abm)
+{
+	struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+
+	REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+			1, 80000);
+
+	/* setDMCUParam_ABMLevel */
+	REG_UPDATE_2(MASTER_COMM_CMD_REG,
+			MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_LEVEL_SET,
+			MASTER_COMM_CMD_REG_BYTE2, MCP_DISABLE_ABM_IMMEDIATELY);
+
+	/* notifyDMCUMsg */
+	REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+	return true;
+}
+
+static bool dce_abm_init_backlight(struct abm *abm)
+{
+	struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+	uint32_t value;
+
+	/* It must not be 0, so we have to restore them
+	 * Bios bug w/a - period resets to zero,
+	 * restoring to cache values which is always correct
+	 */
+	REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
+	if (value == 0 || value == 1) {
+		if (stored_backlight_registers.BL_PWM_CNTL != 0) {
+			REG_WRITE(BL_PWM_CNTL,
+				stored_backlight_registers.BL_PWM_CNTL);
+			REG_WRITE(BL_PWM_CNTL2,
+				stored_backlight_registers.BL_PWM_CNTL2);
+			REG_WRITE(BL_PWM_PERIOD_CNTL,
+				stored_backlight_registers.BL_PWM_PERIOD_CNTL);
+			REG_UPDATE(LVTMA_PWRSEQ_REF_DIV,
+				BL_PWM_REF_DIV,
+				stored_backlight_registers.
+				LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+		} else {
+			/* TODO: Note: This should not really happen since VBIOS
+			 * should have initialized PWM registers on boot.
+			 */
+			REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
+			REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
+		}
+	} else {
+		stored_backlight_registers.BL_PWM_CNTL =
+				REG_READ(BL_PWM_CNTL);
+		stored_backlight_registers.BL_PWM_CNTL2 =
+				REG_READ(BL_PWM_CNTL2);
+		stored_backlight_registers.BL_PWM_PERIOD_CNTL =
+				REG_READ(BL_PWM_PERIOD_CNTL);
+
+		REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
+				&stored_backlight_registers.
+				LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+	}
+
+	/* Have driver take backlight control
+	 * TakeBacklightControl(true)
+	 */
+	value = REG_READ(BIOS_SCRATCH_2);
+	value |= ATOM_S2_VRI_BRIGHT_ENABLE;
+	REG_WRITE(BIOS_SCRATCH_2, value);
+
+	/* Enable the backlight output */
+	REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
+
+	/* Unlock group 2 backlight registers */
+	REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+			BL_PWM_GRP1_REG_LOCK, 0);
+
+	return true;
+}
+
+static bool is_dmcu_initialized(struct abm *abm)
+{
+	struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+	unsigned int dmcu_uc_reset;
+
+	REG_GET(DMCU_STATUS, UC_IN_RESET, &dmcu_uc_reset);
+
+	return !dmcu_uc_reset;
+}
+
+static bool dce_abm_set_backlight_level(
+		struct abm *abm,
+		unsigned int backlight_level,
+		unsigned int frame_ramp,
+		unsigned int controller_id)
+{
+	struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+
+	dm_logger_write(abm->ctx->logger, LOG_BACKLIGHT,
+			"New Backlight level: %d (0x%X)\n",
+			backlight_level, backlight_level);
+
+	/* If DMCU is in reset state, DMCU is uninitialized */
+	if (is_dmcu_initialized(abm))
+		dmcu_set_backlight_level(abm_dce,
+				backlight_level,
+				frame_ramp,
+				controller_id);
+	else
+		driver_set_backlight_level(abm_dce, backlight_level);
+
+	return true;
+}
+
+static const struct abm_funcs dce_funcs = {
+	.abm_init = dce_abm_init,
+	.set_abm_level = dce_abm_set_level,
+	.init_backlight = dce_abm_init_backlight,
+	.set_backlight_level = dce_abm_set_backlight_level,
+	.get_current_backlight_8_bit = dce_abm_get_current_backlight_8_bit,
+	.set_abm_immediate_disable = dce_abm_immediate_disable,
+	.is_dmcu_initialized = is_dmcu_initialized
+};
+
+static void dce_abm_construct(
+	struct dce_abm *abm_dce,
+	struct dc_context *ctx,
+	const struct dce_abm_registers *regs,
+	const struct dce_abm_shift *abm_shift,
+	const struct dce_abm_mask *abm_mask)
+{
+	struct abm *base = &abm_dce->base;
+
+	base->ctx = ctx;
+	base->funcs = &dce_funcs;
+
+	abm_dce->regs = regs;
+	abm_dce->abm_shift = abm_shift;
+	abm_dce->abm_mask = abm_mask;
+}
+
+struct abm *dce_abm_create(
+	struct dc_context *ctx,
+	const struct dce_abm_registers *regs,
+	const struct dce_abm_shift *abm_shift,
+	const struct dce_abm_mask *abm_mask)
+{
+	struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
+
+	if (abm_dce == NULL) {
+		BREAK_TO_DEBUGGER();
+		return NULL;
+	}
+
+	dce_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask);
+
+	abm_dce->base.funcs = &dce_funcs;
+
+	return &abm_dce->base;
+}
+
+void dce_abm_destroy(struct abm **abm)
+{
+	struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
+
+	kfree(abm_dce);
+	*abm = NULL;
+}

+ 228 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_abm.h

@@ -0,0 +1,228 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef _DCE_ABM_H_
+#define _DCE_ABM_H_
+
+#include "abm.h"
+
+#define ABM_COMMON_REG_LIST_DCE_BASE() \
+	SR(BL_PWM_PERIOD_CNTL), \
+	SR(BL_PWM_CNTL), \
+	SR(BL_PWM_CNTL2), \
+	SR(BL_PWM_GRP1_REG_LOCK), \
+	SR(LVTMA_PWRSEQ_REF_DIV), \
+	SR(MASTER_COMM_CNTL_REG), \
+	SR(MASTER_COMM_CMD_REG), \
+	SR(MASTER_COMM_DATA_REG1), \
+	SR(DMCU_STATUS)
+
+#define ABM_DCE110_COMMON_REG_LIST() \
+	ABM_COMMON_REG_LIST_DCE_BASE(), \
+	SR(DC_ABM1_HG_SAMPLE_RATE), \
+	SR(DC_ABM1_LS_SAMPLE_RATE), \
+	SR(BL1_PWM_BL_UPDATE_SAMPLE_RATE), \
+	SR(DC_ABM1_HG_MISC_CTRL), \
+	SR(DC_ABM1_IPCSC_COEFF_SEL), \
+	SR(BL1_PWM_CURRENT_ABM_LEVEL), \
+	SR(BL1_PWM_TARGET_ABM_LEVEL), \
+	SR(BL1_PWM_USER_LEVEL), \
+	SR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES), \
+	SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \
+	SR(BIOS_SCRATCH_2)
+
+#define ABM_DCN10_REG_LIST(id)\
+	ABM_COMMON_REG_LIST_DCE_BASE(), \
+	SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
+	SRI(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \
+	SRI(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \
+	SRI(DC_ABM1_HG_MISC_CTRL, ABM, id), \
+	SRI(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \
+	SRI(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \
+	SRI(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \
+	SRI(BL1_PWM_USER_LEVEL, ABM, id), \
+	SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
+	SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
+	NBIO_SR(BIOS_SCRATCH_2)
+
+#define ABM_SF(reg_name, field_name, post_fix)\
+	.field_name = reg_name ## __ ## field_name ## post_fix
+
+#define ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+	ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
+	ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
+	ABM_SF(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
+	ABM_SF(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
+	ABM_SF(BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
+	ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
+	ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
+	ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh), \
+	ABM_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
+	ABM_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
+	ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
+	ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE1, mask_sh), \
+	ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE2, mask_sh), \
+	ABM_SF(DMCU_STATUS, UC_IN_RESET, mask_sh)
+
+#define ABM_MASK_SH_LIST_DCE110(mask_sh) \
+	ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+	ABM_SF(DC_ABM1_HG_MISC_CTRL, \
+			ABM1_HG_NUM_OF_BINS_SEL, mask_sh), \
+	ABM_SF(DC_ABM1_HG_MISC_CTRL, \
+			ABM1_HG_VMAX_SEL, mask_sh), \
+	ABM_SF(DC_ABM1_HG_MISC_CTRL, \
+			ABM1_HG_BIN_BITWIDTH_SIZE_SEL, mask_sh), \
+	ABM_SF(DC_ABM1_IPCSC_COEFF_SEL, \
+			ABM1_IPCSC_COEFF_SEL_R, mask_sh), \
+	ABM_SF(DC_ABM1_IPCSC_COEFF_SEL, \
+			ABM1_IPCSC_COEFF_SEL_G, mask_sh), \
+	ABM_SF(DC_ABM1_IPCSC_COEFF_SEL, \
+			ABM1_IPCSC_COEFF_SEL_B, mask_sh), \
+	ABM_SF(BL1_PWM_CURRENT_ABM_LEVEL, \
+			BL1_PWM_CURRENT_ABM_LEVEL, mask_sh), \
+	ABM_SF(BL1_PWM_TARGET_ABM_LEVEL, \
+			BL1_PWM_TARGET_ABM_LEVEL, mask_sh), \
+	ABM_SF(BL1_PWM_USER_LEVEL, \
+			BL1_PWM_USER_LEVEL, mask_sh), \
+	ABM_SF(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, \
+			ABM1_LS_MIN_PIXEL_VALUE_THRES, mask_sh), \
+	ABM_SF(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, \
+			ABM1_LS_MAX_PIXEL_VALUE_THRES, mask_sh), \
+	ABM_SF(DC_ABM1_HGLS_REG_READ_PROGRESS, \
+			ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, mask_sh), \
+	ABM_SF(DC_ABM1_HGLS_REG_READ_PROGRESS, \
+			ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, mask_sh), \
+	ABM_SF(DC_ABM1_HGLS_REG_READ_PROGRESS, \
+			ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, mask_sh)
+
+#define ABM_MASK_SH_LIST_DCN10(mask_sh) \
+	ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+	ABM_SF(ABM0_DC_ABM1_HG_MISC_CTRL, \
+			ABM1_HG_NUM_OF_BINS_SEL, mask_sh), \
+	ABM_SF(ABM0_DC_ABM1_HG_MISC_CTRL, \
+			ABM1_HG_VMAX_SEL, mask_sh), \
+	ABM_SF(ABM0_DC_ABM1_HG_MISC_CTRL, \
+			ABM1_HG_BIN_BITWIDTH_SIZE_SEL, mask_sh), \
+	ABM_SF(ABM0_DC_ABM1_IPCSC_COEFF_SEL, \
+			ABM1_IPCSC_COEFF_SEL_R, mask_sh), \
+	ABM_SF(ABM0_DC_ABM1_IPCSC_COEFF_SEL, \
+			ABM1_IPCSC_COEFF_SEL_G, mask_sh), \
+	ABM_SF(ABM0_DC_ABM1_IPCSC_COEFF_SEL, \
+			ABM1_IPCSC_COEFF_SEL_B, mask_sh), \
+	ABM_SF(ABM0_BL1_PWM_CURRENT_ABM_LEVEL, \
+			BL1_PWM_CURRENT_ABM_LEVEL, mask_sh), \
+	ABM_SF(ABM0_BL1_PWM_TARGET_ABM_LEVEL, \
+			BL1_PWM_TARGET_ABM_LEVEL, mask_sh), \
+	ABM_SF(ABM0_BL1_PWM_USER_LEVEL, \
+			BL1_PWM_USER_LEVEL, mask_sh), \
+	ABM_SF(ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, \
+			ABM1_LS_MIN_PIXEL_VALUE_THRES, mask_sh), \
+	ABM_SF(ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, \
+			ABM1_LS_MAX_PIXEL_VALUE_THRES, mask_sh), \
+	ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \
+			ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, mask_sh), \
+	ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \
+			ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, mask_sh), \
+	ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \
+			ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, mask_sh)
+
+#define ABM_REG_FIELD_LIST(type) \
+	type ABM1_HG_NUM_OF_BINS_SEL; \
+	type ABM1_HG_VMAX_SEL; \
+	type ABM1_HG_BIN_BITWIDTH_SIZE_SEL; \
+	type ABM1_IPCSC_COEFF_SEL_R; \
+	type ABM1_IPCSC_COEFF_SEL_G; \
+	type ABM1_IPCSC_COEFF_SEL_B; \
+	type BL1_PWM_CURRENT_ABM_LEVEL; \
+	type BL1_PWM_TARGET_ABM_LEVEL; \
+	type BL1_PWM_USER_LEVEL; \
+	type ABM1_LS_MIN_PIXEL_VALUE_THRES; \
+	type ABM1_LS_MAX_PIXEL_VALUE_THRES; \
+	type ABM1_HG_REG_READ_MISSED_FRAME_CLEAR; \
+	type ABM1_LS_REG_READ_MISSED_FRAME_CLEAR; \
+	type ABM1_BL_REG_READ_MISSED_FRAME_CLEAR; \
+	type BL_PWM_PERIOD; \
+	type BL_PWM_PERIOD_BITCNT; \
+	type BL_ACTIVE_INT_FRAC_CNT; \
+	type BL_PWM_FRACTIONAL_EN; \
+	type MASTER_COMM_INTERRUPT; \
+	type MASTER_COMM_CMD_REG_BYTE0; \
+	type MASTER_COMM_CMD_REG_BYTE1; \
+	type MASTER_COMM_CMD_REG_BYTE2; \
+	type BL_PWM_REF_DIV; \
+	type BL_PWM_EN; \
+	type UC_IN_RESET; \
+	type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
+	type BL_PWM_GRP1_REG_LOCK; \
+	type BL_PWM_GRP1_REG_UPDATE_PENDING
+
+struct dce_abm_shift {
+	ABM_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_abm_mask {
+	ABM_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_abm_registers {
+	uint32_t BL_PWM_PERIOD_CNTL;
+	uint32_t BL_PWM_CNTL;
+	uint32_t BL_PWM_CNTL2;
+	uint32_t LVTMA_PWRSEQ_REF_DIV;
+	uint32_t DC_ABM1_HG_SAMPLE_RATE;
+	uint32_t DC_ABM1_LS_SAMPLE_RATE;
+	uint32_t BL1_PWM_BL_UPDATE_SAMPLE_RATE;
+	uint32_t DC_ABM1_HG_MISC_CTRL;
+	uint32_t DC_ABM1_IPCSC_COEFF_SEL;
+	uint32_t BL1_PWM_CURRENT_ABM_LEVEL;
+	uint32_t BL1_PWM_TARGET_ABM_LEVEL;
+	uint32_t BL1_PWM_USER_LEVEL;
+	uint32_t DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES;
+	uint32_t DC_ABM1_HGLS_REG_READ_PROGRESS;
+	uint32_t MASTER_COMM_CNTL_REG;
+	uint32_t MASTER_COMM_CMD_REG;
+	uint32_t MASTER_COMM_DATA_REG1;
+	uint32_t BIOS_SCRATCH_2;
+	uint32_t DMCU_STATUS;
+	uint32_t BL_PWM_GRP1_REG_LOCK;
+};
+
+struct dce_abm {
+	struct abm base;
+	const struct dce_abm_registers *regs;
+	const struct dce_abm_shift *abm_shift;
+	const struct dce_abm_mask *abm_mask;
+};
+
+struct abm *dce_abm_create(
+	struct dc_context *ctx,
+	const struct dce_abm_registers *regs,
+	const struct dce_abm_shift *abm_shift,
+	const struct dce_abm_mask *abm_mask);
+
+void dce_abm_destroy(struct abm **abm);
+
+#endif /* _DCE_ABM_H_ */

+ 945 - 0
drivers/gpu/drm/amd/display/dc/dce/dce_audio.c

@@ -0,0 +1,945 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dce_audio.h"
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define DCE_AUD(audio)\
+	container_of(audio, struct dce_audio, base)
+
+#define CTX \
+	aud->base.ctx
+#define REG(reg)\
+	(aud->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+	aud->shifts->field_name, aud->masks->field_name
+
+#define IX_REG(reg)\
+	ix ## reg
+
+#define AZ_REG_READ(reg_name) \
+		read_indirect_azalia_reg(audio, IX_REG(reg_name))
+
+#define AZ_REG_WRITE(reg_name, value) \
+		write_indirect_azalia_reg(audio, IX_REG(reg_name), value)
+
+static void write_indirect_azalia_reg(struct audio *audio,
+	uint32_t reg_index,
+	uint32_t reg_data)
+{
+	struct dce_audio *aud = DCE_AUD(audio);
+
+	/* AZALIA_F0_CODEC_ENDPOINT_INDEX  endpoint index  */
+	REG_SET(AZALIA_F0_CODEC_ENDPOINT_INDEX, 0,
+			AZALIA_ENDPOINT_REG_INDEX, reg_index);
+
+	/* AZALIA_F0_CODEC_ENDPOINT_DATA  endpoint data  */
+	REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0,
+			AZALIA_ENDPOINT_REG_DATA, reg_data);
+
+	dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+		"AUDIO:write_indirect_azalia_reg: index: %u  data: %u\n",
+		reg_index, reg_data);
+}
+
+static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index)
+{
+	struct dce_audio *aud = DCE_AUD(audio);
+
+	uint32_t value = 0;
+
+	/* AZALIA_F0_CODEC_ENDPOINT_INDEX  endpoint index  */
+	REG_SET(AZALIA_F0_CODEC_ENDPOINT_INDEX, 0,
+			AZALIA_ENDPOINT_REG_INDEX, reg_index);
+
+	/* AZALIA_F0_CODEC_ENDPOINT_DATA  endpoint data  */
+	value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA);
+
+	dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+		"AUDIO:read_indirect_azalia_reg: index: %u  data: %u\n",
+		reg_index, value);
+
+	return value;
+}
+
+static bool is_audio_format_supported(
+	const struct audio_info *audio_info,
+	enum audio_format_code audio_format_code,
+	uint32_t *format_index)
+{
+	uint32_t index;
+	uint32_t max_channe_index = 0;
+	bool found = false;
+
+	if (audio_info == NULL)
+		return found;
+
+	/* pass through whole array */
+	for (index = 0; index < audio_info->mode_count; index++) {
+		if (audio_info->modes[index].format_code == audio_format_code) {
+			if (found) {
+				/* format has multiply entries, choose one with
+				 *  highst number of channels */
+				if (audio_info->modes[index].channel_count >
+		audio_info->modes[max_channe_index].channel_count) {
+					max_channe_index = index;
+				}
+			} else {
+				/* format found, save it's index */
+				found = true;
+				max_channe_index = index;
+			}
+		}
+	}
+
+	/* return index */
+	if (found && format_index != NULL)
+		*format_index = max_channe_index;
+
+	return found;
+}
+
+/*For HDMI, calculate if specified sample rates can fit into a given timing */
+static void check_audio_bandwidth_hdmi(
+	const struct audio_crtc_info *crtc_info,
+	uint32_t channel_count,
+	union audio_sample_rates *sample_rates)
+{
+	uint32_t samples;
+	uint32_t  h_blank;
+	bool limit_freq_to_48_khz = false;
+	bool limit_freq_to_88_2_khz = false;
+	bool limit_freq_to_96_khz = false;
+	bool limit_freq_to_174_4_khz = false;
+
+	/* For two channels supported return whatever sink support,unmodified*/
+	if (channel_count > 2) {
+
+		/* Based on HDMI spec 1.3 Table 7.5 */
+		if ((crtc_info->requested_pixel_clock <= 27000) &&
+		(crtc_info->v_active <= 576) &&
+		!(crtc_info->interlaced) &&
+		!(crtc_info->pixel_repetition == 2 ||
+		crtc_info->pixel_repetition == 4)) {
+			limit_freq_to_48_khz = true;
+
+		} else if ((crtc_info->requested_pixel_clock <= 27000) &&
+				(crtc_info->v_active <= 576) &&
+				(crtc_info->interlaced) &&
+				(crtc_info->pixel_repetition == 2)) {
+			limit_freq_to_88_2_khz = true;
+
+		} else if ((crtc_info->requested_pixel_clock <= 54000) &&
+				(crtc_info->v_active <= 576) &&
+				!(crtc_info->interlaced)) {
+			limit_freq_to_174_4_khz = true;
+		}
+	}
+
+	/* Also do some calculation for the available Audio Bandwidth for the
+	 * 8 ch (i.e. for the Layout 1 => ch > 2)
+	 */
+	h_blank = crtc_info->h_total - crtc_info->h_active;
+
+	if (crtc_info->pixel_repetition)
+		h_blank *= crtc_info->pixel_repetition;
+
+	/*based on HDMI spec 1.3 Table 7.5 */
+	h_blank -= 58;
+	/*for Control Period */
+	h_blank -= 16;
+
+	samples = h_blank * 10;
+	/* Number of Audio Packets (multiplied by 10) per Line (for 8 ch number
+	 * of Audio samples per line multiplied by 10 - Layout 1)
+	 */
+	samples /= 32;
+	samples *= crtc_info->v_active;
+	/*Number of samples multiplied by 10, per second */
+	samples *= crtc_info->refresh_rate;
+	/*Number of Audio samples per second */
+	samples /= 10;
+
+	/* @todo do it after deep color is implemented
+	 * 8xx - deep color bandwidth scaling
+	 * Extra bandwidth is avaliable in deep color b/c link runs faster than
+	 * pixel rate. This has the effect of allowing more tmds characters to
+	 * be transmitted during blank
+	 */
+
+	switch (crtc_info->color_depth) {
+	case COLOR_DEPTH_888:
+		samples *= 4;
+		break;
+	case COLOR_DEPTH_101010:
+		samples *= 5;
+		break;
+	case COLOR_DEPTH_121212:
+		samples *= 6;
+		break;
+	default:
+		samples *= 4;
+		break;
+	}
+
+	samples /= 4;
+
+	/*check limitation*/
+	if (samples < 88200)
+		limit_freq_to_48_khz = true;
+	else if (samples < 96000)
+		limit_freq_to_88_2_khz = true;
+	else if (samples < 176400)
+		limit_freq_to_96_khz = true;
+	else if (samples < 192000)
+		limit_freq_to_174_4_khz = true;
+
+	if (sample_rates != NULL) {
+		/* limit frequencies */
+		if (limit_freq_to_174_4_khz)
+			sample_rates->rate.RATE_192 = 0;
+
+		if (limit_freq_to_96_khz) {
+			sample_rates->rate.RATE_192 = 0;
+			sample_rates->rate.RATE_176_4 = 0;
+		}
+		if (limit_freq_to_88_2_khz) {
+			sample_rates->rate.RATE_192 = 0;
+			sample_rates->rate.RATE_176_4 = 0;
+			sample_rates->rate.RATE_96 = 0;
+		}
+		if (limit_freq_to_48_khz) {
+			sample_rates->rate.RATE_192 = 0;
+			sample_rates->rate.RATE_176_4 = 0;
+			sample_rates->rate.RATE_96 = 0;
+			sample_rates->rate.RATE_88_2 = 0;
+		}
+	}
+}
+
+/*For DP SST, calculate if specified sample rates can fit into a given timing */
+static void check_audio_bandwidth_dpsst(
+	const struct audio_crtc_info *crtc_info,
+	uint32_t channel_count,
+	union audio_sample_rates *sample_rates)
+{
+	/* do nothing */
+}
+
+/*For DP MST, calculate if specified sample rates can fit into a given timing */
+static void check_audio_bandwidth_dpmst(
+	const struct audio_crtc_info *crtc_info,
+	uint32_t channel_count,
+	union audio_sample_rates *sample_rates)
+{
+	/* do nothing  */
+}
+
+static void check_audio_bandwidth(
+	const struct audio_crtc_info *crtc_info,
+	uint32_t channel_count,
+	enum signal_type signal,
+	union audio_sample_rates *sample_rates)
+{
+	switch (signal) {
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		check_audio_bandwidth_hdmi(
+			crtc_info, channel_count, sample_rates);
+		break;
+	case SIGNAL_TYPE_EDP:
+	case SIGNAL_TYPE_DISPLAY_PORT:
+		check_audio_bandwidth_dpsst(
+			crtc_info, channel_count, sample_rates);
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		check_audio_bandwidth_dpmst(
+			crtc_info, channel_count, sample_rates);
+		break;
+	default:
+		break;
+	}
+}
+
+/* expose/not expose HBR capability to Audio driver */
+static void set_high_bit_rate_capable(
+	struct audio *audio,
+	bool capable)
+{
+	uint32_t value = 0;
+
+	/* set high bit rate audio capable*/
+	value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR);
+
+	set_reg_field_value(value, capable,
+		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR,
+		HBR_CAPABLE);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR, value);
+}
+
+/* set video latency in in ms/2+1 */
+static void set_video_latency(
+	struct audio *audio,
+	int latency_in_ms)
+{
+	uint32_t value = 0;
+
+	if ((latency_in_ms < 0) || (latency_in_ms > 255))
+		return;
+
+	value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC);
+
+	set_reg_field_value(value, latency_in_ms,
+		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+		VIDEO_LIPSYNC);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+		value);
+}
+
+/* set audio latency in in ms/2+1 */
+static void set_audio_latency(
+	struct audio *audio,
+	int latency_in_ms)
+{
+	uint32_t value = 0;
+
+	if (latency_in_ms < 0)
+		latency_in_ms = 0;
+
+	if (latency_in_ms > 255)
+		latency_in_ms = 255;
+
+	value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC);
+
+	set_reg_field_value(value, latency_in_ms,
+		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+		AUDIO_LIPSYNC);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+		value);
+}
+
+void dce_aud_az_enable(struct audio *audio)
+{
+	struct dce_audio *aud = DCE_AUD(audio);
+	uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+
+	set_reg_field_value(value, 1,
+			AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+			CLOCK_GATING_DISABLE);
+		set_reg_field_value(value, 1,
+			AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+			AUDIO_ENABLED);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+	value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+
+	dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+			"\n\t========= AUDIO:dce_aud_az_enable: index: %u  data: 0x%x\n",
+			audio->inst, value);
+}
+
+void dce_aud_az_disable(struct audio *audio)
+{
+	uint32_t value;
+	struct dce_audio *aud = DCE_AUD(audio);
+
+	value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+
+	set_reg_field_value(value, 0,
+		AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+		AUDIO_ENABLED);
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+
+	set_reg_field_value(value, 0,
+			AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+			CLOCK_GATING_DISABLE);
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+	value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+	dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+			"\n\t========= AUDIO:dce_aud_az_disable: index: %u  data: 0x%x\n",
+			audio->inst, value);
+}
+
+void dce_aud_az_configure(
+	struct audio *audio,
+	enum signal_type signal,
+	const struct audio_crtc_info *crtc_info,
+	const struct audio_info *audio_info)
+{
+	struct dce_audio *aud = DCE_AUD(audio);
+
+	uint32_t speakers = audio_info->flags.info.ALLSPEAKERS;
+	uint32_t value;
+	uint32_t field = 0;
+	enum audio_format_code audio_format_code;
+	uint32_t format_index;
+	uint32_t index;
+	bool is_ac3_supported = false;
+	union audio_sample_rates sample_rate;
+	uint32_t strlen = 0;
+	value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+	set_reg_field_value(value, 1,
+			AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+			CLOCK_GATING_DISABLE);
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+
+	/* Speaker Allocation */
+	/*
+	uint32_t value;
+	uint32_t field = 0;*/
+	value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+
+	set_reg_field_value(value,
+		speakers,
+		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+		SPEAKER_ALLOCATION);
+
+	/* LFE_PLAYBACK_LEVEL = LFEPBL
+	 * LFEPBL = 0 : Unknown or refer to other information
+	 * LFEPBL = 1 : 0dB playback
+	 * LFEPBL = 2 : +10dB playback
+	 * LFE_BL = 3 : Reserved
+	 */
+	set_reg_field_value(value,
+		0,
+		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+		LFE_PLAYBACK_LEVEL);
+	/* todo: according to reg spec LFE_PLAYBACK_LEVEL is read only.
+	 *  why are we writing to it?  DCE8 does not write this */
+
+
+	set_reg_field_value(value,
+		0,
+		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+		HDMI_CONNECTION);
+
+	set_reg_field_value(value,
+		0,
+		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+		DP_CONNECTION);
+
+	field = get_reg_field_value(value,
+			AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+			EXTRA_CONNECTION_INFO);
+
+	field &= ~0x1;
+
+	set_reg_field_value(value,
+		field,
+		AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+		EXTRA_CONNECTION_INFO);
+
+	/* set audio for output signal */
+	switch (signal) {
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		set_reg_field_value(value,
+			1,
+			AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+			HDMI_CONNECTION);
+
+		break;
+
+	case SIGNAL_TYPE_EDP:
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		set_reg_field_value(value,
+			1,
+			AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+			DP_CONNECTION);
+		break;
+	default:
+		BREAK_TO_DEBUGGER();
+		break;
+	}
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, value);
+
+	/*  Audio Descriptors   */
+	/* pass through all formats */
+	for (format_index = 0; format_index < AUDIO_FORMAT_CODE_COUNT;
+			format_index++) {
+		audio_format_code =
+			(AUDIO_FORMAT_CODE_FIRST + format_index);
+
+		/* those are unsupported, skip programming */
+		if (audio_format_code == AUDIO_FORMAT_CODE_1BITAUDIO ||
+			audio_format_code == AUDIO_FORMAT_CODE_DST)
+			continue;
+
+		value = 0;
+
+		/* check if supported */
+		if (is_audio_format_supported(
+				audio_info, audio_format_code, &index)) {
+			const struct audio_mode *audio_mode =
+					&audio_info->modes[index];
+			union audio_sample_rates sample_rates =
+					audio_mode->sample_rates;
+			uint8_t byte2 = audio_mode->max_bit_rate;
+
+			/* adjust specific properties */
+			switch (audio_format_code) {
+			case AUDIO_FORMAT_CODE_LINEARPCM: {
+				check_audio_bandwidth(
+					crtc_info,
+					audio_mode->channel_count,
+					signal,
+					&sample_rates);
+
+				byte2 = audio_mode->sample_size;
+
+				set_reg_field_value(value,
+						sample_rates.all,
+						AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+						SUPPORTED_FREQUENCIES_STEREO);
+				}
+				break;
+			case AUDIO_FORMAT_CODE_AC3:
+				is_ac3_supported = true;
+				break;
+			case AUDIO_FORMAT_CODE_DOLBYDIGITALPLUS:
+			case AUDIO_FORMAT_CODE_DTS_HD:
+			case AUDIO_FORMAT_CODE_MAT_MLP:
+			case AUDIO_FORMAT_CODE_DST:
+			case AUDIO_FORMAT_CODE_WMAPRO:
+				byte2 = audio_mode->vendor_specific;
+				break;
+			default:
+				break;
+			}
+
+			/* fill audio format data */
+			set_reg_field_value(value,
+					audio_mode->channel_count - 1,
+					AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+					MAX_CHANNELS);
+
+			set_reg_field_value(value,
+					sample_rates.all,
+					AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+					SUPPORTED_FREQUENCIES);
+
+			set_reg_field_value(value,
+					byte2,
+					AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+					DESCRIPTOR_BYTE_2);
+		} /* if */
+
+		AZ_REG_WRITE(
+				AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 + format_index,
+				value);
+	} /* for */
+
+	if (is_ac3_supported)
+		/* todo: this reg global.  why program global register? */
+		REG_WRITE(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS,
+				0x05);
+
+	/* check for 192khz/8-Ch support for HBR requirements */
+	sample_rate.all = 0;
+	sample_rate.rate.RATE_192 = 1;
+
+	check_audio_bandwidth(
+		crtc_info,
+		8,
+		signal,
+		&sample_rate);
+
+	set_high_bit_rate_capable(audio, sample_rate.rate.RATE_192);
+
+	/* Audio and Video Lipsync */
+	set_video_latency(audio, audio_info->video_latency);
+	set_audio_latency(audio, audio_info->audio_latency);
+
+	value = 0;
+	set_reg_field_value(value, audio_info->manufacture_id,
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
+		MANUFACTURER_ID);
+
+	set_reg_field_value(value, audio_info->product_id,
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
+		PRODUCT_ID);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
+		value);
+
+	value = 0;
+
+	/*get display name string length */
+	while (audio_info->display_name[strlen++] != '\0') {
+		if (strlen >=
+		MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS)
+			break;
+		}
+	set_reg_field_value(value, strlen,
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
+		SINK_DESCRIPTION_LEN);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
+		value);
+
+	/*
+	*write the port ID:
+	*PORT_ID0 = display index
+	*PORT_ID1 = 16bit BDF
+	*(format MSB->LSB: 8bit Bus, 5bit Device, 3bit Function)
+	*/
+
+	value = 0;
+
+	set_reg_field_value(value, audio_info->port_id[0],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2,
+		PORT_ID0);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2, value);
+
+	value = 0;
+	set_reg_field_value(value, audio_info->port_id[1],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3,
+		PORT_ID1);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3, value);
+
+	/*write the 18 char monitor string */
+
+	value = 0;
+	set_reg_field_value(value, audio_info->display_name[0],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+		DESCRIPTION0);
+
+	set_reg_field_value(value, audio_info->display_name[1],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+		DESCRIPTION1);
+
+	set_reg_field_value(value, audio_info->display_name[2],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+		DESCRIPTION2);
+
+	set_reg_field_value(value, audio_info->display_name[3],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+		DESCRIPTION3);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4, value);
+
+	value = 0;
+	set_reg_field_value(value, audio_info->display_name[4],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+		DESCRIPTION4);
+
+	set_reg_field_value(value, audio_info->display_name[5],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+		DESCRIPTION5);
+
+	set_reg_field_value(value, audio_info->display_name[6],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+		DESCRIPTION6);
+
+	set_reg_field_value(value, audio_info->display_name[7],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+		DESCRIPTION7);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5, value);
+
+	value = 0;
+	set_reg_field_value(value, audio_info->display_name[8],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+		DESCRIPTION8);
+
+	set_reg_field_value(value, audio_info->display_name[9],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+		DESCRIPTION9);
+
+	set_reg_field_value(value, audio_info->display_name[10],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+		DESCRIPTION10);
+
+	set_reg_field_value(value, audio_info->display_name[11],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+		DESCRIPTION11);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6, value);
+
+	value = 0;
+	set_reg_field_value(value, audio_info->display_name[12],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+		DESCRIPTION12);
+
+	set_reg_field_value(value, audio_info->display_name[13],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+		DESCRIPTION13);
+
+	set_reg_field_value(value, audio_info->display_name[14],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+		DESCRIPTION14);
+
+	set_reg_field_value(value, audio_info->display_name[15],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+		DESCRIPTION15);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7, value);
+
+	value = 0;
+	set_reg_field_value(value, audio_info->display_name[16],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8,
+		DESCRIPTION16);
+
+	set_reg_field_value(value, audio_info->display_name[17],
+		AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8,
+		DESCRIPTION17);
+
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8, value);
+}
+
+/*
+* todo: wall clk related functionality probably belong to clock_src.
+*/
+
+/* search pixel clock value for Azalia HDMI Audio */
+static void get_azalia_clock_info_hdmi(
+	uint32_t crtc_pixel_clock_in_khz,
+	uint32_t actual_pixel_clock_in_khz,
+	struct azalia_clock_info *azalia_clock_info)
+{
+	/* audio_dto_phase= 24 * 10,000;
+	 *   24MHz in [100Hz] units */
+	azalia_clock_info->audio_dto_phase =
+			24 * 10000;
+
+	/* audio_dto_module = PCLKFrequency * 10,000;
+	 *  [khz] -> [100Hz] */
+	azalia_clock_info->audio_dto_module =
+			actual_pixel_clock_in_khz * 10;
+}
+
+static void get_azalia_clock_info_dp(
+	uint32_t requested_pixel_clock_in_khz,
+	const struct audio_pll_info *pll_info,
+	struct azalia_clock_info *azalia_clock_info)
+{
+	/* Reported dpDtoSourceClockInkhz value for
+	 * DCE8 already adjusted for SS, do not need any
+	 * adjustment here anymore
+	 */
+
+	/*audio_dto_phase = 24 * 10,000;
+	 * 24MHz in [100Hz] units */
+	azalia_clock_info->audio_dto_phase = 24 * 10000;
+
+	/*audio_dto_module = dpDtoSourceClockInkhz * 10,000;
+	 *  [khz] ->[100Hz] */
+	azalia_clock_info->audio_dto_module =
+		pll_info->dp_dto_source_clock_in_khz * 10;
+}
+
+void dce_aud_wall_dto_setup(
+	struct audio *audio,
+	enum signal_type signal,
+	const struct audio_crtc_info *crtc_info,
+	const struct audio_pll_info *pll_info)
+{
+	struct dce_audio *aud = DCE_AUD(audio);
+
+	struct azalia_clock_info clock_info = { 0 };
+
+	if (dc_is_hdmi_signal(signal)) {
+		uint32_t src_sel;
+
+		/*DTO0 Programming goal:
+		-generate 24MHz, 128*Fs from 24MHz
+		-use DTO0 when an active HDMI port is connected
+		(optionally a DP is connected) */
+
+		/* calculate DTO settings */
+		get_azalia_clock_info_hdmi(
+			crtc_info->requested_pixel_clock,
+			crtc_info->calculated_pixel_clock,
+			&clock_info);
+
+		dm_logger_write(audio->ctx->logger, LOG_HW_AUDIO,\
+				"\n%s:Input::requested_pixel_clock = %d"\
+				"calculated_pixel_clock =%d\n"\
+				"audio_dto_module = %d audio_dto_phase =%d \n\n", __func__,\
+				crtc_info->requested_pixel_clock,\
+				crtc_info->calculated_pixel_clock,\
+				clock_info.audio_dto_module,\
+				clock_info.audio_dto_phase);
+
+		/* On TN/SI, Program DTO source select and DTO select before
+		programming DTO modulo and DTO phase. These bits must be
+		programmed first, otherwise there will be no HDMI audio at boot
+		up. This is a HW sequence change (different from old ASICs).
+		Caution when changing this programming sequence.
+
+		HDMI enabled, using DTO0
+		program master CRTC for DTO0 */
+		src_sel = pll_info->dto_source - DTO_SOURCE_ID0;
+		REG_UPDATE_2(DCCG_AUDIO_DTO_SOURCE,
+			DCCG_AUDIO_DTO0_SOURCE_SEL, src_sel,
+			DCCG_AUDIO_DTO_SEL, 0);
+
+		/* module */
+		REG_UPDATE(DCCG_AUDIO_DTO0_MODULE,
+			DCCG_AUDIO_DTO0_MODULE, clock_info.audio_dto_module);
+
+		/* phase */
+		REG_UPDATE(DCCG_AUDIO_DTO0_PHASE,
+			DCCG_AUDIO_DTO0_PHASE, clock_info.audio_dto_phase);
+	} else {
+		/*DTO1 Programming goal:
+		-generate 24MHz, 512*Fs, 128*Fs from 24MHz
+		-default is to used DTO1, and switch to DTO0 when an audio
+		master HDMI port is connected
+		-use as default for DP
+
+		calculate DTO settings */
+		get_azalia_clock_info_dp(
+			crtc_info->requested_pixel_clock,
+			pll_info,
+			&clock_info);
+
+		/* Program DTO select before programming DTO modulo and DTO
+		phase. default to use DTO1 */
+
+		REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+				DCCG_AUDIO_DTO_SEL, 1);
+
+		REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+			DCCG_AUDIO_DTO_SEL, 1);
+			/* DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1)
+			 * Select 512fs for DP TODO: web register definition
+			 * does not match register header file
+			 * DCE11 version it's commented out while DCE8 it's set to 1
+			*/
+
+		/* module */
+		REG_UPDATE(DCCG_AUDIO_DTO1_MODULE,
+				DCCG_AUDIO_DTO1_MODULE, clock_info.audio_dto_module);
+
+		/* phase */
+		REG_UPDATE(DCCG_AUDIO_DTO1_PHASE,
+				DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase);
+
+		REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+				DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1);
+
+	}
+}
+
+static bool dce_aud_endpoint_valid(struct audio *audio)
+{
+	uint32_t value;
+	uint32_t port_connectivity;
+
+	value = AZ_REG_READ(
+			AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+
+	port_connectivity = get_reg_field_value(value,
+			AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
+			PORT_CONNECTIVITY);
+
+	return !(port_connectivity == 1);
+}
+
+/* initialize HW state */
+void dce_aud_hw_init(
+		struct audio *audio)
+{
+	uint32_t value;
+	struct dce_audio *aud = DCE_AUD(audio);
+
+	/* we only need to program the following registers once, so we only do
+	it for the inst 0*/
+	if (audio->inst != 0)
+		return;
+
+	/* Suport R5 - 32khz
+	 * Suport R6 - 44.1khz
+	 * Suport R7 - 48khz
+	 */
+	/*disable clock gating before write to endpoint register*/
+	value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+	set_reg_field_value(value, 1,
+			AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+			CLOCK_GATING_DISABLE);
+	AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+	REG_UPDATE(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES,
+			AUDIO_RATE_CAPABILITIES, 0x70);
+
+	/*Keep alive bit to verify HW block in BU. */
+	REG_UPDATE_2(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES,
+			CLKSTOP, 1,
+			EPSS, 1);
+}
+
+static const struct audio_funcs funcs = {
+	.endpoint_valid = dce_aud_endpoint_valid,
+	.hw_init = dce_aud_hw_init,
+	.wall_dto_setup = dce_aud_wall_dto_setup,
+	.az_enable = dce_aud_az_enable,
+	.az_disable = dce_aud_az_disable,
+	.az_configure = dce_aud_az_configure,
+	.destroy = dce_aud_destroy,
+};
+
+void dce_aud_destroy(struct audio **audio)
+{
+	struct dce_audio *aud = DCE_AUD(*audio);
+
+	kfree(aud);
+	*audio = NULL;
+}
+
+struct audio *dce_audio_create(
+		struct dc_context *ctx,
+		unsigned int inst,
+		const struct dce_audio_registers *reg,
+		const struct dce_audio_shift *shifts,
+		const struct dce_aduio_mask *masks
+		)
+{
+	struct dce_audio *audio = kzalloc(sizeof(*audio), GFP_KERNEL);
+
+	if (audio == NULL) {
+		ASSERT_CRITICAL(audio);
+		return NULL;
+	}
+
+	audio->base.ctx = ctx;
+	audio->base.inst = inst;
+	audio->base.funcs = &funcs;
+
+	audio->regs = reg;
+	audio->shifts = shifts;
+	audio->masks = masks;
+
+	return &audio->base;
+}
+

Some files were not shown because too many files changed in this diff